content_type stringclasses 8 values | main_lang stringclasses 7 values | message stringlengths 1 50 | sha stringlengths 40 40 | patch stringlengths 52 962k | file_count int64 1 300 |
|---|---|---|---|---|---|
Python | Python | add tests for chebfit with deg specified as list | 1a9fb061bb4f217f335616c65abd36644c2f2ac7 | <ide><path>numpy/polynomial/tests/test_chebyshev.py
<ide> def test_chebfit(self):
<ide> def f(x):
<ide> return x*(x - 1)*(x - 2)
<ide>
<add> def f2(x):
<add> return x**4 + x**2 + 1
<add>
<ide> # Test exceptions
<ide> assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
<ide> assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
<ide> def f(x):
<ide> assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
<ide> assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
<ide> assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
<add> assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
<add> assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
<add> assert_raises(TypeError, cheb.chebfit, [1], [1], [])
<ide>
<ide> # Test fit
<ide> x = np.linspace(0, 2)
<ide> def f(x):
<ide> coef3 = cheb.chebfit(x, y, 3)
<ide> assert_equal(len(coef3), 4)
<ide> assert_almost_equal(cheb.chebval(x, coef3), y)
<add> coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
<add> assert_equal(len(coef3), 4)
<add> assert_almost_equal(cheb.chebval(x, coef3), y)
<ide> #
<ide> coef4 = cheb.chebfit(x, y, 4)
<ide> assert_equal(len(coef4), 5)
<ide> assert_almost_equal(cheb.chebval(x, coef4), y)
<add> coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
<add> assert_equal(len(coef4), 5)
<add> assert_almost_equal(cheb.chebval(x, coef4), y)
<add> # check things still work if deg is not in strict increasing
<add> coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
<add> assert_equal(len(coef4), 5)
<add> assert_almost_equal(cheb.chebval(x, coef4), y)
<ide> #
<ide> coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
<ide> assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
<add> coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
<add> assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
<ide> # test weighting
<ide> w = np.zeros_like(x)
<ide> yw = y.copy()
<ide> w[1::2] = 1
<ide> y[0::2] = 0
<ide> wcoef3 = cheb.chebfit(x, yw, 3, w=w)
<ide> assert_almost_equal(wcoef3, coef3)
<add> wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
<add> assert_almost_equal(wcoef3, coef3)
<ide> #
<ide> wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
<ide> assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
<add> wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
<add> assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
<ide> # test scaling with complex values x points whose square
<ide> # is zero when summed.
<ide> x = [1, 1j, -1, -1j]
<ide> assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
<add> assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
<add> # test fitting only even polynomials
<add> x = np.linspace(-1, 1)
<add> y = f2(x)
<add> coef1 = cheb.chebfit(x, y, 4)
<add> assert_almost_equal(cheb.chebval(x, coef1), y)
<add> coef2 = cheb.chebfit(x, y, [0, 2, 4])
<add> assert_almost_equal(cheb.chebval(x, coef2), y)
<add> assert_almost_equal(coef1, coef2)
<ide>
<ide>
<ide> class TestCompanion(TestCase): | 1 |
Javascript | Javascript | fix failing test | 43f4cc160811657135508cdefe6aed2b3522a905 | <ide><path>packages/react-reconciler/src/__tests__/ReactSuspense-test.internal.js
<ide> describe('ReactSuspense', () => {
<ide> expect(Scheduler).toFlushAndYield(['Child 1', 'create layout']);
<ide> expect(root).toMatchRenderedOutput('Child 1');
<ide>
<del> ReactTestRenderer.act(() => {
<add> act(() => {
<ide> _setShow(true);
<ide> });
<del> expect(Scheduler).toHaveYielded(
<del> // DEV behavior differs from prod
<del> // In DEV sometimes the work loop sync-flushes the commit
<del> // where as in production, it schedules it behind a timeout.
<del> // See shouldForceFlushFallbacksInDEV() usage
<del> __DEV__
<del> ? ['Child 1', 'Suspend! [Child 2]', 'Loading...', 'destroy layout']
<del> : ['Child 1', 'Suspend! [Child 2]', 'Loading...'],
<del> );
<add> expect(Scheduler).toHaveYielded([
<add> 'Child 1',
<add> 'Suspend! [Child 2]',
<add> 'Loading...',
<add> ]);
<ide> jest.advanceTimersByTime(1000);
<del> expect(Scheduler).toHaveYielded(
<del> // DEV behavior differs from prod
<del> // In DEV sometimes the work loop sync-flushes the commit
<del> // where as in production, it schedules it behind a timeout.
<del> // See shouldForceFlushFallbacksInDEV() usage
<del> __DEV__
<del> ? ['Promise resolved [Child 2]']
<del> : ['destroy layout', 'Promise resolved [Child 2]'],
<del> );
<add> expect(Scheduler).toHaveYielded([
<add> 'destroy layout',
<add> 'Promise resolved [Child 2]',
<add> ]);
<ide> expect(Scheduler).toFlushAndYield(['Child 1', 'Child 2', 'create layout']);
<ide> expect(root).toMatchRenderedOutput(['Child 1', 'Child 2'].join(''));
<ide> }); | 1 |
Ruby | Ruby | copy hidden files from bottles | 3cb2d62a1b13d4e71c8c5deeb6250fc594f14ec8 | <ide><path>Library/Homebrew/formula.rb
<ide> require "language/python"
<ide> require "tab"
<ide> require "mktemp"
<add>require "find"
<ide>
<ide> # A formula provides instructions and metadata for Homebrew to install a piece
<ide> # of software. Every Homebrew formula is a {Formula}.
<ide> def run_post_install
<ide> with_env(new_env) do
<ide> ENV.clear_sensitive_environment!
<ide>
<del> Pathname.glob("#{bottle_prefix}/{etc,var}/**/*") do |path|
<add> etc_var_dirs = [bottle_prefix/"etc", bottle_prefix/"var"]
<add> Find.find(*etc_var_dirs.select(&:directory?)) do |path|
<add> path = Pathname.new(path)
<ide> path.extend(InstallRenamed)
<ide> path.cp_path_sub(bottle_prefix, HOMEBREW_PREFIX)
<ide> end | 1 |
Ruby | Ruby | fix reference to issues_url | 1e184138489707dcbaef73dd23a600d63ab661cf | <ide><path>Library/brew.rb
<ide> def require? path
<ide> rescue Exception => e
<ide> onoe e
<ide> puts "#{Tty.white}Please report this bug:"
<del> puts " #{Tty.em}#{ISSUES_URL}#{Tty.reset}"
<add> puts " #{Tty.em}#{OS::ISSUES_URL}#{Tty.reset}"
<ide> puts e.backtrace
<ide> exit 1
<ide> else | 1 |
PHP | PHP | add missing docblock | 97b6deec70ed6592b529b29f13c1b10a05c2be5c | <ide><path>src/Illuminate/Routing/Console/ControllerMakeCommand.php
<ide> protected function buildModelReplacements(array $replace)
<ide> *
<ide> * @param string $model
<ide> * @return string
<add> *
<add> * @throws \InvalidArgumentException
<ide> */
<ide> protected function parseModel($model)
<ide> { | 1 |
Text | Text | clarify case conventions in formula naming | 57a81f9e22ef2db7fefc432adafe2d63a2206bc8 | <ide><path>share/doc/homebrew/Formula-Cookbook.md
<ide> When importing classes, Homebrew will require the formula and then create an ins
<ide> * `foo-bar.rb` => `FooBar`
<ide> * `foobar.rb` => `Foobar`
<ide>
<del>Thus, if you change the name of the class, you must also rename the file. Filenames should be all lowercase.
<add>Thus, if you change the name of the class, you must also rename the file. Filenames should be all lowercase, and class names should be the strict CamelCase equivalent, e.g. formulae `gnu-go` and `sdl_mixer` become classes `GnuGo` and `SdlMixer`, even if part of their name is an acronym.
<ide>
<ide> Add aliases by creating symlinks in `Library/Aliases`.
<ide> | 1 |
Javascript | Javascript | add logging level for rnpm previous linking | 85a2d6a65d373b0f3cc94d1ad2ca1dc9b7717809 | <ide><path>local-cli/link/link.js
<ide> const log = require('npmlog');
<ide> const path = require('path');
<ide> const uniq = require('lodash').uniq;
<ide> const flatten = require('lodash').flatten;
<add>const chalk = require('chalk');
<ide>
<ide> const isEmpty = require('lodash').isEmpty;
<ide> const promiseWaterfall = require('./promiseWaterfall');
<ide> const linkDependencyAndroid = (androidProject, dependency) => {
<ide> const isInstalled = isInstalledAndroid(androidProject, dependency.name);
<ide>
<ide> if (isInstalled) {
<del> log.info(`Android module ${dependency.name} is already linked`);
<add> log.info(chalk.grey(`Android module ${dependency.name} is already linked`));
<ide> return null;
<ide> }
<ide>
<ide> const linkDependencyIOS = (iOSProject, dependency) => {
<ide> const isInstalled = isInstalledIOS(iOSProject, dependency.config.ios);
<ide>
<ide> if (isInstalled) {
<del> log.info(`iOS module ${dependency.name} is already linked`);
<add> log.info(chalk.grey(`iOS module ${dependency.name} is already linked`));
<ide> return;
<ide> }
<ide> | 1 |
Text | Text | fix callbacks doc | 6c55dbd6d55f7882334d1bb9cc1f08b17abfe677 | <ide><path>docs/sources/callbacks.md
<ide> Save the model after every epoch. If `save_best_only=True`, the latest best mode
<ide>
<ide>
<ide> ```python
<del>keras.callbacks.EarlyStopping(patience=0, verbose=0)
<add>keras.callbacks.EarlyStopping(monitor='val_loss', patience=0, verbose=0)
<ide> ```
<ide>
<del>Stop training after no improvement of the validation loss is seen for `patience` epochs.
<add>Stop training after no improvement of the metric `monitor` is seen for `patience` epochs.
<ide>
<ide> ---
<ide> | 1 |
Ruby | Ruby | fix simple_format output example ending tag | 7e1cb39f7257e24672761e4e309330bf85d7c270 | <ide><path>actionview/lib/action_view/helpers/text_helper.rb
<ide> def word_wrap(text, options = {})
<ide> # # => "<p>Unblinkable.</p>"
<ide> #
<ide> # simple_format("<blink>Blinkable!</blink> It's true.", {}, sanitize: false)
<del> # # => "<p><blink>Blinkable!</span> It's true.</p>"
<add> # # => "<p><blink>Blinkable!</blink> It's true.</p>"
<ide> def simple_format(text, html_options = {}, options = {})
<ide> wrapper_tag = options.fetch(:wrapper_tag, :p)
<ide> | 1 |
Text | Text | move getconfig message to the line above in readme | 341c34d4bfc0b001b2dc018c44676368a289be84 | <ide><path>readme.md
<ide> module.exports = {
<ide> ```js
<ide> // pages/index.js
<ide> import getConfig from 'next/config'
<del>const {serverRuntimeConfig, publicRuntimeConfig} = getConfig() // Only holds serverRuntimeConfig and publicRuntimeConfig from next.config.js nothing else.
<add>// Only holds serverRuntimeConfig and publicRuntimeConfig from next.config.js nothing else.
<add>const {serverRuntimeConfig, publicRuntimeConfig} = getConfig()
<ide>
<ide> console.log(serverRuntimeConfig.mySecret) // Will only be available on the server side
<ide> console.log(publicRuntimeConfig.staticFolder) // Will be available on both server and client | 1 |
Javascript | Javascript | add missing copyright headers | 220288725fc05f9838ec0a77876d9d9f1491da5a | <ide><path>test/simple/test-eio-limit.js
<add>// Copyright Joyent, Inc. and other Node contributors.
<add>//
<add>// Permission is hereby granted, free of charge, to any person obtaining a
<add>// copy of this software and associated documentation files (the
<add>// "Software"), to deal in the Software without restriction, including
<add>// without limitation the rights to use, copy, modify, merge, publish,
<add>// distribute, sublicense, and/or sell copies of the Software, and to permit
<add>// persons to whom the Software is furnished to do so, subject to the
<add>// following conditions:
<add>//
<add>// The above copyright notice and this permission notice shall be included
<add>// in all copies or substantial portions of the Software.
<add>//
<add>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
<add>// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
<add>// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
<add>// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
<add>// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
<add>// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
<add>// USE OR OTHER DEALINGS IN THE SOFTWARE.
<add>
<ide> var assert = require('assert'),
<ide> zlib = require('zlib'),
<ide> started = 0, | 1 |
Javascript | Javascript | remove unnecessary comma in transformcontrols | a2a0c8879d63abd2697d80a396d842f343875542 | <ide><path>examples/js/controls/TransformControls.js
<ide> var group = {
<ide>
<ide> handles: this[ "handles" ],
<del> pickers: this[ "pickers" ],
<add> pickers: this[ "pickers" ]
<ide>
<ide> };
<ide> | 1 |
Ruby | Ruby | remove test file | 019c8ae814d0e89af3da543a956f22a4db92c5a3 | <ide><path>test.rb
<del># frozen_string_literal: true
<del>begin
<del> require "bundler/inline"
<del>rescue LoadError => e
<del> $stderr.puts "Bundler version 1.10 or later is required. Please update
<del> your Bundler"
<del> raise e
<del>end
<del>
<del>gemfile(true) do
<del> source "https://rubygems.org"
<del>
<del> gem "benchmark-ips"
<del> gem "rails"
<del>end
<del>
<del>def allocate_count
<del> GC.disable
<del> before = ObjectSpace.count_objects
<del> yield
<del> after = ObjectSpace.count_objects
<del> after.each { |k,v| after[k] = v - before[k] }
<del> after[:T_HASH] -= 1 # probe effect - we created the before hash.
<del> GC.enable
<del> result = after.reject { |k,v| v == 0 }
<del> GC.start
<del> result
<del>end
<del>
<del>@hash = {}
<del>
<del>def master_version
<del> "#{@hash["rel"]} nofollow".lstrip
<del>end
<del>
<del>def key_version
<del> if @hash.key?("rel")
<del> "#{@hash["rel"]} nofollow".lstrip
<del> else
<del> "nofollow"
<del> end
<del>end
<del>
<del>def present_version
<del> if @hash["rel"].present?
<del> "#{@hash["rel"]} nofollow"
<del> else
<del> "nofollow".freeze
<del> end
<del>end
<del>
<del>def nil_version
<del> if @hash["rel"].nil?
<del> "nofollow".freeze
<del> else
<del> "#{@hash["rel"]} nofollow"
<del> end
<del>end
<del>
<del>def blank_version
<del> if @hash["rel"].blank?
<del> "nofollow".freeze
<del> else
<del> "#{@hash["rel"]} nofollow"
<del> end
<del>end
<del>
<del>def test
<del> puts "master_version"
<del> puts allocate_count { 1000.times { master_version } }
<del> puts "key_version"
<del> puts allocate_count { 1000.times { key_version } }
<del> puts "present_version"
<del> puts allocate_count { 1000.times { present_version } }
<del> puts "nil_version"
<del> puts allocate_count { 1000.times { nil_version } }
<del> puts "blank_version"
<del> puts allocate_count { 1000.times { blank_version } }
<del>
<del> Benchmark.ips do |x|
<del> x.report("master_version") { master_version }
<del> x.report("key_version") { key_version }
<del> x.report("present_version") { present_version }
<del> x.report("nil_version") { nil_version }
<del> x.report("blank_version") { blank_version }
<del> x.compare!
<del> end
<del>end
<del>
<del>puts 'no rel key'
<del>
<del>test
<del>
<del>puts 'rel key with real stuff'
<del>
<del>@hash['rel'] = 'hi'.freeze
<del>
<del>test
<del>
<del>puts 'rel key with nil'
<del>
<del>@hash['rel'] = nil
<del>
<del>test
<del>
<del>puts 'rel key with ""'
<del>
<del>@hash['rel'] = ""
<del>
<del>test | 1 |
Javascript | Javascript | add withcredentials config option | 86182a9415b9209662b16c25c180b958ba7e6cf9 | <ide><path>src/ng/http.js
<ide> function $HttpProvider() {
<ide> * {@link angular.module.ng.$cacheFactory $cacheFactory}, this cache will be used for
<ide> * caching.
<ide> * - **timeout** – `{number}` – timeout in milliseconds.
<add> * - **withCredentials** - `{boolean}` - whether to to set the `withCredentials` flag on the
<add> * XHR object. See {@link https://developer.mozilla.org/en/http_access_control#section_5
<add> * requests with credentials} for more information.
<ide> *
<ide> * @returns {HttpPromise} Returns a {@link angular.module.ng.$q promise} object with the
<ide> * standard `then` method and two http specific methods: `success` and `error`. The `then`
<ide> function $HttpProvider() {
<ide>
<ide> // if we won't have the response in cache, send the request to the backend
<ide> if (!cachedResp) {
<del> $httpBackend(config.method, url, reqData, done, reqHeaders, config.timeout);
<add> $httpBackend(config.method, url, reqData, done, reqHeaders, config.timeout,
<add> config.withCredentials);
<ide> }
<ide>
<ide> return promise;
<ide><path>src/ng/httpBackend.js
<ide> function $HttpBackendProvider() {
<ide>
<ide> function createHttpBackend($browser, XHR, $browserDefer, callbacks, body, locationProtocol) {
<ide> // TODO(vojta): fix the signature
<del> return function(method, url, post, callback, headers, timeout) {
<add> return function(method, url, post, callback, headers, timeout, withCredentials) {
<ide> $browser.$$incOutstandingRequestCount();
<ide> url = url || $browser.url();
<ide>
<ide> function createHttpBackend($browser, XHR, $browserDefer, callbacks, body, locati
<ide> }
<ide> };
<ide>
<add> if (withCredentials) {
<add> xhr.withCredentials = true;
<add> }
<add>
<ide> xhr.send(post || '');
<ide>
<ide> if (timeout > 0) {
<ide><path>test/ng/httpBackendSpec.js
<ide> describe('$httpBackend', function() {
<ide> });
<ide>
<ide>
<add> it('should set withCredentials', function() {
<add> $backend('GET', '/some.url', null, callback, {}, null, true);
<add> expect(MockXhr.$$lastInstance.withCredentials).toBe(true);
<add> });
<add>
<add>
<ide> describe('JSONP', function() {
<ide>
<ide> var SCRIPT_URL = /([^\?]*)\?cb=angular\.callbacks\.(.*)/;
<ide><path>test/ng/httpSpec.js
<ide> describe('$http', function() {
<ide> beforeEach(function() {
<ide> callback = jasmine.createSpy('done');
<ide> });
<add>
<ide> beforeEach(module(function($exceptionHandlerProvider) {
<ide> $exceptionHandlerProvider.mode('log');
<ide> }));
<ide> describe('$http', function() {
<ide> }));
<ide>
<ide>
<del> // TODO(vojta): test passing timeout
<del>
<del>
<ide> describe('params', function() {
<ide> it('should do basic request with params and encode', inject(function($httpBackend, $http) {
<ide> $httpBackend.expect('GET', '/url?a%3D=%3F%26&b=2').respond('');
<ide> describe('$http', function() {
<ide> });
<ide> });
<ide> });
<add>
<add>
<add> it('should pass timeout and withCredentials', function() {
<add> var $httpBackend = jasmine.createSpy('$httpBackend');
<add>
<add> $httpBackend.andCallFake(function(m, u, d, c, h, timeout, withCredentials) {
<add> expect(timeout).toBe(12345);
<add> expect(withCredentials).toBe(true);
<add> });
<add>
<add> module(function($provide) {
<add> $provide.value('$httpBackend', $httpBackend);
<add> });
<add>
<add> inject(function($http) {
<add> $http({method: 'GET', url: 'some.html', timeout: 12345, withCredentials: true});
<add> expect($httpBackend).toHaveBeenCalledOnce();
<add> });
<add>
<add> $httpBackend.verifyNoOutstandingExpectation = noop;
<add> });
<ide> }); | 4 |
PHP | PHP | respect them when previewing notification | ed4411d310f259f75e95e882b748ba9d76d7cfad | <ide><path>src/Illuminate/Notifications/Messages/MailMessage.php
<ide> public function render()
<ide>
<ide> return Container::getInstance()
<ide> ->make(Markdown::class)
<add> ->theme($this->theme ?: 'default')
<ide> ->render($this->markdown, $this->data());
<ide> }
<ide> | 1 |
Ruby | Ruby | use enumerable#with_index. we're on ruby > 1.8.7 | 49bf8597e674eafc2f180360e0c3c52f2b343a72 | <ide><path>activerecord/lib/active_record/relation.rb
<ide> def update_all(updates, conditions = nil, options = {})
<ide> # Person.update(people.keys, people.values)
<ide> def update(id, attributes)
<ide> if id.is_a?(Array)
<del> idx = -1
<del> id.collect { |one_id| idx += 1; update(one_id, attributes[idx]) }
<add> id.each.with_index.map {|one_id, idx| update(one_id, attributes[idx])}
<ide> else
<ide> object = find(id)
<ide> object.update_attributes(attributes) | 1 |
Text | Text | update changelog for 16.7 | 84b86471ea12bd1f5994d878a318dcdead1137e9 | <ide><path>CHANGELOG.md
<ide> </summary>
<ide> </details>
<ide>
<add>## 16.7.0 (December 19, 2018)
<add>
<add>### React DOM
<add>
<add>* Fix performance of `React.lazy` for large numbers of lazily-loaded components. ([@acdlite](http://github.com/acdlite) in [#14429](https://github.com/facebook/react/pull/14429))
<add>* Clear fields on unmount to avoid memory leaks. ([@trueadm](http://github.com/trueadm) in [#14276](https://github.com/facebook/react/pull/14276))
<add>* Fix bug with SSR and context when mixing `react-dom/server@16.6` and `react@<16.6`. ([@gaearon](http://github.com/gaearon) in [#14291](https://github.com/facebook/react/pull/14291))
<add>* Fix a performance regression in profiling mode. ([@bvaughn](http://github.com/bvaughn) in [#14383](https://github.com/facebook/react/pull/14383))
<add>
<add>### Scheduler (Experimental)
<add>
<add>* Post to MessageChannel instead of window. ([@acdlite](http://github.com/acdlite) in [#14234](https://github.com/facebook/react/pull/14234))
<add>* Reduce serialization overhead. ([@developit](http://github.com/developit) in [#14249](https://github.com/facebook/react/pull/14249))
<add>* Fix fallback to `setTimeout` in testing environments. ([@bvaughn](http://github.com/bvaughn) in [#14358](https://github.com/facebook/react/pull/14358))
<add>* Add methods for debugging. ([@mrkev](http://github.com/mrkev) in [#14053](https://github.com/facebook/react/pull/14053))
<add>
<add>
<ide> ## 16.6.3 (November 12, 2018)
<ide>
<ide> ### React DOM
<ide> This release was published in a broken state and should be skipped.
<ide> * Fix incorrect data in `compositionend` event when typing Korean on IE11 ([@crux153](https://github.com/crux153) in [#12563](https://github.com/facebook/react/issues/12563))
<ide> * Fix a crash when using dynamic `children` in the `<option>` tag ([@Slowyn](https://github.com/Slowyn) in [#13261](https://github.com/facebook/react/issues/13261), [@gaearon](https://github.com/gaearon) in [#13465](https://github.com/facebook/react/pull/13465))
<ide> * Fix the `checked` attribute not getting initially set on the `input` ([@dilidili](https://github.com/dilidili) in [#13114](https://github.com/facebook/react/issues/13114))
<del>* Fix hydration of `dangerouslySetInnerHTML` when `__html` is not a string ([@gaearon](https://github.com/gaearon) in [#13353](https://github.com/facebook/react/issues/13353))
<add>* Fix hydration of `dangerouslySetInnerHTML` when `__html` is not a string ([@gaearon](https://github.com/gaearon) in [#13353](https://github.com/facebook/react/issues/13353))
<ide> * Fix a warning about missing controlled `onChange` to fire on falsy values too ([@nicolevy](https://github.com/nicolevy) in [#12628](https://github.com/facebook/react/issues/12628))
<ide> * Fix `submit` and `reset` buttons getting an empty label ([@ellsclytn](https://github.com/ellsclytn) in [#12780](https://github.com/facebook/react/issues/12780))
<ide> * Fix the `onSelect` event not being triggered after drag and drop ([@gaearon](https://github.com/gaearon) in [#13422](https://github.com/facebook/react/issues/13422))
<ide> This release was published in a broken state and should be skipped.
<ide> * Deduplicate warning messages about invalid callback. ([@yenshih](https://github.com/yenshih) in [#11833](https://github.com/facebook/react/pull/11833))
<ide> * Deprecate `ReactDOM.unstable_createPortal()` in favor of `ReactDOM.createPortal()`. ([@prometheansacrifice](https://github.com/prometheansacrifice) in [#11747](https://github.com/facebook/react/pull/11747))
<ide> * Don't emit User Timing entries for context types. ([@abhaynikam](https://github.com/abhaynikam) in [#12250](https://github.com/facebook/react/pull/12250))
<del>* Improve the error message when context consumer child isn't a function. ([@raunofreiberg](https://github.com/raunofreiberg) in [#12267](https://github.com/facebook/react/pull/12267))
<add>* Improve the error message when context consumer child isn't a function. ([@raunofreiberg](https://github.com/raunofreiberg) in [#12267](https://github.com/facebook/react/pull/12267))
<ide> * Improve the error message when adding a ref to a functional component. ([@skiritsis](https://github.com/skiritsis) in [#11782](https://github.com/facebook/react/pull/11782))
<ide>
<ide> ### React DOM Server | 1 |
Text | Text | add note about polyfilling css grid for ie11. | 16ee0fe2dcf9fe1ad6ff28b1b93a709aa0cc16e7 | <ide><path>docs/advanced-features/customizing-postcss-config.md
<ide> Out of the box, with no configuration, Next.js compiles CSS with the following t
<ide> - [Break Properties](https://developer.mozilla.org/en-US/docs/Web/CSS/break-after)
<ide> - [`font-variant` Property](https://developer.mozilla.org/en-US/docs/Web/CSS/font-variant)
<ide> - [Gap Properties](https://developer.mozilla.org/en-US/docs/Web/CSS/gap)
<del> - [Grid Layout](https://developer.mozilla.org/en-US/docs/Web/CSS/grid)
<ide> - [Media Query Ranges](https://developer.mozilla.org/en-US/docs/Web/CSS/Media_Queries/Using_media_queries#Syntax_improvements_in_Level_4)
<ide>
<ide> By default, [Custom Properties](https://developer.mozilla.org/en-US/docs/Web/CSS/var) (CSS variables) are **not compiled** for IE11 support.
<ide>
<ide> CSS variables are not compiled because it is [not possible to safely do so](https://github.com/MadLittleMods/postcss-css-variables#caveats).
<ide> If you must use variables, consider using something like [Sass variables](https://sass-lang.com/documentation/variables) which are compiled away by [Sass](https://sass-lang.com/).
<ide>
<add>> **Note**: To support [Grid Layout](https://developer.mozilla.org/en-US/docs/Web/CSS/grid), you need to enable `grid: "autoplace"` for Autoprefixer. See "Customizing Plugins" below.
<add>
<ide> ## Customizing Target Browsers
<ide>
<ide> Next.js allows you to configure the target browsers (for [Autoprefixer](https://github.com/postcss/autoprefixer) and compiled css features) through [Browserslist](https://github.com/browserslist/browserslist). | 1 |
Javascript | Javascript | name anonymous function for debugging | 65dbc52a9beacb23a45ed5d7b1fa4bce792e8142 | <ide><path>lib/internal/modules/cjs/loader.js
<ide> Module._initPaths = function() {
<ide> }
<ide>
<ide> if (nodePath) {
<del> paths = nodePath.split(path.delimiter).filter(function(path) {
<add> paths = nodePath.split(path.delimiter).filter(function pathsFilterCB(path) {
<ide> return !!path;
<ide> }).concat(paths);
<ide> } | 1 |
PHP | PHP | refresh the cache event dispatcher when faked | d5c46e5f7b07fb6524a16f88c809ca9f2f7789f3 | <ide><path>src/Illuminate/Cache/CacheManager.php
<ide> protected function setEventDispatcher(Repository $repository): void
<ide> );
<ide> }
<ide>
<add> /**
<add> * Refreshes the event dispatcher of all resolved repositories
<add> * with the currently bound event dispatcher implementation.
<add> */
<add> public function refreshEventDispatcher()
<add> {
<add> array_map([$this, 'setEventDispatcher'], $this->stores);
<add> }
<add>
<ide> /**
<ide> * Get the cache prefix.
<ide> *
<ide><path>src/Illuminate/Support/Facades/Event.php
<ide> public static function fake($eventsToFake = [])
<ide> static::swap($fake = new EventFake(static::getFacadeRoot(), $eventsToFake));
<ide>
<ide> Model::setEventDispatcher($fake);
<add> Cache::refreshEventDispatcher();
<ide>
<ide> return $fake;
<ide> }
<ide><path>tests/Support/SupportFacadesEventTest.php
<ide>
<ide> namespace Illuminate\Tests\Support;
<ide>
<add>use Illuminate\Cache\CacheManager;
<add>use Illuminate\Cache\Events\CacheMissed;
<add>use Illuminate\Config\Repository as ConfigRepository;
<ide> use Illuminate\Container\Container;
<add>use Illuminate\Contracts\Events\Dispatcher as DispatcherContract;
<ide> use Illuminate\Database\Eloquent\Model;
<ide> use Illuminate\Events\Dispatcher;
<add>use Illuminate\Support\Facades\Cache;
<ide> use Illuminate\Support\Facades\Event;
<ide> use Illuminate\Support\Facades\Facade;
<ide> use Illuminate\Support\Testing\Fakes\EventFake;
<ide> protected function setUp(): void
<ide>
<ide> $container = new Container;
<ide> $container->instance('events', $this->events);
<add> $container->alias('events', DispatcherContract::class);
<add> $container->instance('cache', new CacheManager($container));
<add> $container->instance('config', new ConfigRepository($this->getCacheConfig()));
<ide>
<ide> Facade::setFacadeApplication($container);
<ide> }
<ide> public function testFakeForSwapsDispatchers()
<ide> $this->assertSame($this->events, Event::getFacadeRoot());
<ide> $this->assertSame($this->events, Model::getEventDispatcher());
<ide> }
<add>
<add> public function testFakeSwapsDispatchersInResolvedCacheRepositories()
<add> {
<add> $arrayRepository = Cache::store('array');
<add>
<add> $this->events->shouldReceive('dispatch')->once();
<add> $arrayRepository->get('foo');
<add>
<add> Event::fake();
<add>
<add> $arrayRepository->get('bar');
<add>
<add> Event::assertDispatched(CacheMissed::class);
<add> }
<add>
<add> protected function getCacheConfig()
<add> {
<add> return [
<add> 'cache' => [
<add> 'stores' => [
<add> 'array' => [
<add> 'driver' => 'array'
<add> ]
<add> ]
<add> ]
<add> ];
<add> }
<ide> }
<ide>
<ide> class FakeForStub | 3 |
Javascript | Javascript | add matrix calcs to direction shadow class | 0442b430be43b04580abcdcc7cdbbbaed5540c44 | <ide><path>src/lights/DirectionalLightShadow.js
<ide> import { OrthographicCamera } from '../cameras/OrthographicCamera.js';
<ide> * @author mrdoob / http://mrdoob.com/
<ide> */
<ide>
<del>function DirectionalLightShadow( ) {
<add>function DirectionalLightShadow() {
<ide>
<ide> LightShadow.call( this, new OrthographicCamera( - 5, 5, 5, - 5, 0.5, 500 ) );
<ide>
<ide> }
<ide>
<ide> DirectionalLightShadow.prototype = Object.assign( Object.create( LightShadow.prototype ), {
<ide>
<del> constructor: DirectionalLightShadow
<add> constructor: DirectionalLightShadow,
<add>
<add> isDirectionalLightShadow: true,
<add>
<add> updateMatrices: function ( light, viewCamera, viewportIndex ) {
<add>
<add> var camera = this.camera,
<add> lightPositionWorld = this._lightPositionWorld,
<add> lookTarget = this._lookTarget;
<add>
<add> lightPositionWorld.setFromMatrixPosition( light.matrixWorld );
<add> camera.position.copy( lightPositionWorld );
<add>
<add> lookTarget.setFromMatrixPosition( light.target.matrixWorld );
<add> camera.lookAt( lookTarget );
<add> camera.updateMatrixWorld();
<add>
<add> LightShadow.prototype.updateMatrices.call( this, light, viewCamera, viewportIndex );
<add>
<add> }
<ide>
<ide> } );
<ide> | 1 |
Javascript | Javascript | fix tests after reverting commits | e86de0db56e156d6c603584c440026effaf19b82 | <ide><path>test/ng/rootScopeSpec.js
<ide> describe('Scope', function() {
<ide> $browser.defer.flush(0);
<ide> expect(log).toEqual(['eval-ed 1!', 'eval-ed 2!']);
<ide>
<del> expect(function() {
<del> $browser.defer.flush(0);
<del> }).toThrow('No deferred tasks with delay up to 0ms to be flushed!');
<add> $browser.defer.flush(100000);
<add> expect(log).toEqual(['eval-ed 1!', 'eval-ed 2!']);
<ide> });
<ide> });
<ide> }); | 1 |
Javascript | Javascript | avoid non-alphanumeric chars in expando properties | 0cdec797de23555c95a70978f4d9e06f3b041330 | <ide><path>src/data/Data.js
<ide> function Data() {
<ide> }
<ide> });
<ide>
<del> this.expando = jQuery.expando + Math.random();
<add> this.expando = jQuery.expando + Data.uid++;
<ide> }
<ide>
<ide> Data.uid = 1; | 1 |
Python | Python | fix message formatting | e2f75eb492c8f8747d8c752f3869f329dfcd3086 | <ide><path>spacy/cli/download.py
<ide> def download(model, direct=False, *pip_args):
<ide> # Dirty, but since spacy.download and the auto-linking is
<ide> # mostly a convenience wrapper, it's best to show a success
<ide> # message and loading instructions, even if linking fails.
<del> prints(Messages.M001.format(name=model_name), title=Messages.M002)
<add> prints(Messages.M001, title=Messages.M002.format(name=model_name))
<ide>
<ide>
<ide> def get_json(url, desc): | 1 |
Ruby | Ruby | remove unnecessary `assert_valid_default` | 59c218ea9b41e3ef5cef571f31fb83a7ec72e9a4 | <ide><path>activerecord/lib/active_record/connection_adapters/mysql/column.rb
<ide> class Column < ConnectionAdapters::Column # :nodoc:
<ide>
<ide> def initialize(*)
<ide> super
<del> assert_valid_default
<ide> extract_default
<ide> end
<ide>
<ide> def extract_default
<ide> @default = null || strict ? nil : ''
<ide> end
<ide> end
<del>
<del> def assert_valid_default
<del> if blob_or_text_column? && default.present?
<del> raise ArgumentError, "#{type} columns cannot have a default value: #{default.inspect}"
<del> end
<del> end
<ide> end
<ide> end
<ide> end
<ide><path>activerecord/test/cases/column_definition_test.rb
<ide> def test_should_be_empty_string_default_for_mysql_binary_data_types
<ide> end
<ide>
<ide> def test_should_not_set_default_for_blob_and_text_data_types
<del> assert_raise ArgumentError do
<del> MySQL::Column.new("title", "a", SqlTypeMetadata.new(sql_type: "blob"))
<del> end
<del>
<ide> text_type = MySQL::TypeMetadata.new(
<ide> SqlTypeMetadata.new(type: :text))
<del> assert_raise ArgumentError do
<del> MySQL::Column.new("title", "Hello", text_type)
<del> end
<ide>
<ide> text_column = MySQL::Column.new("title", nil, text_type)
<ide> assert_equal nil, text_column.default | 2 |
Python | Python | remove numpy.compat._pep440 from default imports | 4c986e7cedde18530745dca072e06659f1fb20a9 | <ide><path>numpy/compat/__init__.py
<ide>
<ide> """
<ide> from . import _inspect
<del>from . import _pep440
<ide> from . import py3k
<ide> from ._inspect import getargspec, formatargspec
<ide> from .py3k import * | 1 |
Javascript | Javascript | add touch (pan) support to d3.behavior.zoom | 2d98bf10a4b4e41c5df54e0a1febb22ab687c48f | <ide><path>d3.behavior.js
<ide> d3.behavior.zoom = function() {
<ide> .on("mousedown", mousedown)
<ide> .on("mousewheel", mousewheel)
<ide> .on("DOMMouseScroll", mousewheel)
<del> .on("dblclick", mousewheel);
<add> .on("dblclick", mousewheel)
<add> .on("touchstart", mousedown);
<ide>
<ide> d3.select(window)
<ide> .on("mousemove", mousemove)
<del> .on("mouseup", mouseup);
<add> .on("mouseup", mouseup)
<add> .on("touchmove", mousemove)
<add> .on("touchend", mouseup)
<add> .on("touchcancel", mouseup);
<ide> }
<ide>
<ide> function mousedown(d, i) {
<add> var touches = d3.event.touches,
<add> e = touches ? touches[0] : d3.event;
<ide> pan = {
<del> x0: x - d3.event.clientX,
<del> y0: y - d3.event.clientY,
<add> x0: x - e.clientX,
<add> y0: y - e.clientY,
<ide> target: this,
<ide> data: d,
<ide> index: i
<ide> d3.behavior.zoom = function() {
<ide> }
<ide>
<ide> function mousemove() {
<add> var touches = d3.event.touches,
<add> e = touches ? touches[0] : d3.event;
<ide> zoom = null;
<ide> if (pan) {
<del> x = d3.event.clientX + pan.x0;
<del> y = d3.event.clientY + pan.y0;
<add> x = e.clientX + pan.x0;
<add> y = e.clientY + pan.y0;
<ide> dispatch.call(pan.target, pan.data, pan.index);
<ide> }
<ide> }
<ide><path>d3.behavior.min.js
<del>(function(){d3.behavior={},d3.behavior.zoom=function(){function m(a,b){function i(a,b){var c=a.__domain||(a.__domain=a.domain()),d=a.range().map(function(a){return(a-b)/h});a.domain(c).domain(d.map(a.invert))}var g=d3.event,h=Math.pow(2,e);d3.event={scale:h,translate:[c,d],transform:function(a,b){a&&i(a,c),b&&i(b,d)}};try{for(var j=0,k=f.length;j<k;j++)f[j].call(this,a,b)}finally{d3.event=g}}function l(f,g){var i=d3.event;if(!h){var j=d3.svg.mouse(this.nearestViewportElement||this);h={x0:c,y0:d,z0:e,x1:c-j[0],y1:d-j[1]}}if(i.type==="dblclick")e=i.shiftKey?Math.ceil(e-1):Math.floor(e+1);else{var k=(i.wheelDelta/120||-i.detail)*.1;if(a<0){var l=Date.now(),n=l-b;n>9&&Math.abs(i.wheelDelta)/n>=50&&(a=1),b=l}a===1&&(k*=.03),e+=k}var o=Math.pow(2,e-h.z0)-1;c=h.x0+h.x1*o,d=h.y0+h.y1*o,m.call(this,f,g)}function k(){g&&(j(),g=null)}function j(){h=null,g&&(c=d3.event.clientX+g.x0,d=d3.event.clientY+g.y0,m.call(g.target,g.data,g.index))}function i(a,b){g={x0:c-d3.event.clientX,y0:d-d3.event.clientY,target:this,data:a,index:b},d3.event.preventDefault(),window.focus()}function h(){var a=this.on("mousedown",i).on("mousewheel",l).on("DOMMouseScroll",l).on("dblclick",l);d3.select(window).on("mousemove",j).on("mouseup",k)}var a=/WebKit\/533/.test(navigator.userAgent)?-1:0,b=0,c=0,d=0,e=0,f=[],g,h;h.on=function(a,b){a=="zoom"&&f.push(b);return h};return h}})()
<ide>\ No newline at end of file
<add>(function(){d3.behavior={},d3.behavior.zoom=function(){function m(a,b){function i(a,b){var c=a.__domain||(a.__domain=a.domain()),d=a.range().map(function(a){return(a-b)/h});a.domain(c).domain(d.map(a.invert))}var g=d3.event,h=Math.pow(2,e);d3.event={scale:h,translate:[c,d],transform:function(a,b){a&&i(a,c),b&&i(b,d)}};try{for(var j=0,k=f.length;j<k;j++)f[j].call(this,a,b)}finally{d3.event=g}}function l(f,g){var i=d3.event;if(!h){var j=d3.svg.mouse(this.nearestViewportElement||this);h={x0:c,y0:d,z0:e,x1:c-j[0],y1:d-j[1]}}if(i.type==="dblclick")e=i.shiftKey?Math.ceil(e-1):Math.floor(e+1);else{var k=(i.wheelDelta/120||-i.detail)*.1;if(a<0){var l=Date.now(),n=l-b;n>9&&Math.abs(i.wheelDelta)/n>=50&&(a=1),b=l}a===1&&(k*=.03),e+=k}var o=Math.pow(2,e-h.z0)-1;c=h.x0+h.x1*o,d=h.y0+h.y1*o,m.call(this,f,g)}function k(){g&&(j(),g=null)}function j(){var a=d3.event.touches,b=a?a[0]:d3.event;h=null,g&&(c=b.clientX+g.x0,d=b.clientY+g.y0,m.call(g.target,g.data,g.index))}function i(a,b){var e=d3.event.touches,f=e?e[0]:d3.event;g={x0:c-f.clientX,y0:d-f.clientY,target:this,data:a,index:b},d3.event.preventDefault(),window.focus()}function h(){var a=this.on("mousedown",i).on("mousewheel",l).on("DOMMouseScroll",l).on("dblclick",l).on("touchstart",i);d3.select(window).on("mousemove",j).on("mouseup",k).on("touchmove",j).on("touchend",k).on("touchcancel",k)}var a=/WebKit\/533/.test(navigator.userAgent)?-1:0,b=0,c=0,d=0,e=0,f=[],g,h;h.on=function(a,b){a=="zoom"&&f.push(b);return h};return h}})()
<ide>\ No newline at end of file
<ide><path>src/behavior/zoom.js
<ide> d3.behavior.zoom = function() {
<ide> .on("mousedown", mousedown)
<ide> .on("mousewheel", mousewheel)
<ide> .on("DOMMouseScroll", mousewheel)
<del> .on("dblclick", mousewheel);
<add> .on("dblclick", mousewheel)
<add> .on("touchstart", mousedown);
<ide>
<ide> d3.select(window)
<ide> .on("mousemove", mousemove)
<del> .on("mouseup", mouseup);
<add> .on("mouseup", mouseup)
<add> .on("touchmove", mousemove)
<add> .on("touchend", mouseup)
<add> .on("touchcancel", mouseup);
<ide> }
<ide>
<ide> function mousedown(d, i) {
<add> var touches = d3.event.touches,
<add> e = touches ? touches[0] : d3.event;
<ide> pan = {
<del> x0: x - d3.event.clientX,
<del> y0: y - d3.event.clientY,
<add> x0: x - e.clientX,
<add> y0: y - e.clientY,
<ide> target: this,
<ide> data: d,
<ide> index: i
<ide> d3.behavior.zoom = function() {
<ide> }
<ide>
<ide> function mousemove() {
<add> var touches = d3.event.touches,
<add> e = touches ? touches[0] : d3.event;
<ide> zoom = null;
<ide> if (pan) {
<del> x = d3.event.clientX + pan.x0;
<del> y = d3.event.clientY + pan.y0;
<add> x = e.clientX + pan.x0;
<add> y = e.clientY + pan.y0;
<ide> dispatch.call(pan.target, pan.data, pan.index);
<ide> }
<ide> } | 3 |
Python | Python | replace instance of md5.new() with hashlib.md5() | b27c8683f0ed046bf78f465cf15c52a7df0681cd | <ide><path>py/libcloud/drivers/gogrid.py
<ide> from libcloud.interface import INodeDriver
<ide> from zope.interface import implements
<ide> import httplib
<del>import md5
<ide> import time
<ide> import urllib
<ide> import hashlib
<ide> def make_request(self, action, params, data=''):
<ide>
<ide> def get_signature(self, key, secret):
<ide> """ create sig from md5 of key + secret + time """
<del> m = md5.new(key + secret + str(int(time.time())))
<del> return m.hexdigest()
<add> return hashlib.md5(key + secret + str(int(time.time()))).hexdigest()
<ide>
<ide> def describe_servers(self):
<ide> return Response(self.make_request("/grid/server/list", {})) | 1 |
PHP | PHP | shorten a long exception message | 2a1b5d55ae88d8edd3a56a6eac3f61ce74668548 | <ide><path>laravel/lang.php
<ide> protected function parse($key)
<ide> return array($segments[0], implode('.', array_slice($segments, 1)));
<ide> }
<ide>
<del> throw new \InvalidArgumentException("Invalid language line [$key]. A specific line must be specified.");
<add> throw new \InvalidArgumentException("Invalid language line [$key].");
<ide> }
<ide>
<ide> /**
<ide> public function __toString()
<ide> return $this->get();
<ide> }
<ide>
<del>}
<add>}
<ide>\ No newline at end of file | 1 |
Javascript | Javascript | resolve merge conflict in unknown property test | 1cc312437c249fcae4d4e25dd8b18b1eb562d9ba | <ide><path>packages/sproutcore-metal/lib/properties.js
<ide> var WATCHED_DESC = {
<ide>
<ide> function w_get(obj, keyName) {
<ide> var m = meta(obj, false);
<del> return m.values ? m.values[keyName] : undefined;
<add> if (m.values) {
<add> if (m.values[keyName] !== undefined) { return m.values[keyName]; }
<add> obj.unknownProperty(keyName);
<add> };
<add> return undefined;
<ide> }
<ide>
<ide> function w_set(obj, keyName, value) {
<ide><path>packages/sproutcore-metal/tests/watching/watch_test.js
<ide> test("watching an object THEN defining it should work also", function() {
<ide>
<ide> });
<ide>
<add>test("accessing a watched unknown property triggers call to unknownProperty", function(){
<add> var unknownPropertyWasCalled = false;
<add> var watchedPropertyName = 'foo'
<add>
<add> var obj = {
<add> unknownProperty: function(propName){
<add> if (propName === watchedPropertyName) { unknownPropertyWasCalled = true };
<add> }
<add> };
<add> SC.watch(obj, watchedPropertyName);
<add> SC.get(obj, watchedPropertyName)
<add>
<add> ok(unknownPropertyWasCalled);
<add>});
<add>
<ide> testBoth('watching an object value then unwatching should restore old value', function(get, set) {
<ide>
<ide> var obj = { foo: { bar: { baz: { biff: 'BIFF' } } } }; | 2 |
Ruby | Ruby | disambiguate uses of post/post by using "article" | 9a2b18cfd34dedcd73d3f03e681272366708740b | <ide><path>actionpack/lib/action_view/helpers/date_helper.rb
<ide> def time_ago_in_words(from_time, include_seconds = false)
<ide> # NOTE: Discarded selects will default to 1. So if no month select is available, January will be assumed.
<ide> #
<ide> # ==== Examples
<del> # # Generates a date select that when POSTed is stored in the post variable, in the written_on attribute.
<del> # date_select("post", "written_on")
<add> # # Generates a date select that when POSTed is stored in the article variable, in the written_on attribute.
<add> # date_select("article", "written_on")
<ide> #
<del> # # Generates a date select that when POSTed is stored in the post variable, in the written_on attribute,
<add> # # Generates a date select that when POSTed is stored in the article variable, in the written_on attribute,
<ide> # # with the year in the year drop down box starting at 1995.
<del> # date_select("post", "written_on", :start_year => 1995)
<add> # date_select("article", "written_on", :start_year => 1995)
<ide> #
<del> # # Generates a date select that when POSTed is stored in the post variable, in the written_on attribute,
<add> # # Generates a date select that when POSTed is stored in the article variable, in the written_on attribute,
<ide> # # with the year in the year drop down box starting at 1995, numbers used for months instead of words,
<ide> # # and without a day select box.
<del> # date_select("post", "written_on", :start_year => 1995, :use_month_numbers => true,
<add> # date_select("article", "written_on", :start_year => 1995, :use_month_numbers => true,
<ide> # :discard_day => true, :include_blank => true)
<ide> #
<del> # # Generates a date select that when POSTed is stored in the post variable, in the written_on attribute
<add> # # Generates a date select that when POSTed is stored in the article variable, in the written_on attribute
<ide> # # with the fields ordered as day, month, year rather than month, day, year.
<del> # date_select("post", "written_on", :order => [:day, :month, :year])
<add> # date_select("article", "written_on", :order => [:day, :month, :year])
<ide> #
<ide> # # Generates a date select that when POSTed is stored in the user variable, in the birthday attribute
<ide> # # lacking a year field.
<ide> # date_select("user", "birthday", :order => [:month, :day])
<ide> #
<del> # # Generates a date select that when POSTed is stored in the post variable, in the written_on attribute
<add> # # Generates a date select that when POSTed is stored in the article variable, in the written_on attribute
<ide> # # which is initially set to the date 3 days from the current date
<del> # date_select("post", "written_on", :default => 3.days.from_now)
<add> # date_select("article", "written_on", :default => 3.days.from_now)
<ide> #
<ide> # # Generates a date select that when POSTed is stored in the credit_card variable, in the bill_due attribute
<ide> # # that will have a default day of 20.
<ide> # date_select("credit_card", "bill_due", :default => { :day => 20 })
<ide> #
<ide> # # Generates a date select with custom prompts.
<del> # date_select("post", "written_on", :prompt => { :day => 'Select day', :month => 'Select month', :year => 'Select year' })
<add> # date_select("article", "written_on", :prompt => { :day => 'Select day', :month => 'Select month', :year => 'Select year' })
<ide> #
<ide> # The selects are prepared for multi-parameter assignment to an Active Record object.
<ide> #
<ide> def date_select(object_name, method, options = {}, html_options = {})
<ide> # If anything is passed in the html_options hash it will be applied to every select tag in the set.
<ide> #
<ide> # ==== Examples
<del> # # Creates a time select tag that, when POSTed, will be stored in the post variable in the sunrise attribute.
<del> # time_select("post", "sunrise")
<add> # # Creates a time select tag that, when POSTed, will be stored in the article variable in the sunrise attribute.
<add> # time_select("article", "sunrise")
<ide> #
<del> # # Creates a time select tag with a seconds field that, when POSTed, will be stored in the post variables in
<add> # # Creates a time select tag with a seconds field that, when POSTed, will be stored in the article variables in
<ide> # # the sunrise attribute.
<del> # time_select("post", "start_time", :include_seconds => true)
<add> # time_select("article", "start_time", :include_seconds => true)
<ide> #
<ide> # # You can set the <tt>:minute_step</tt> to 15 which will give you: 00, 15, 30 and 45.
<ide> # time_select 'game', 'game_time', {:minute_step => 15}
<ide> #
<ide> # # Creates a time select tag with a custom prompt. Use <tt>:prompt => true</tt> for generic prompts.
<del> # time_select("post", "written_on", :prompt => {:hour => 'Choose hour', :minute => 'Choose minute', :second => 'Choose seconds'})
<del> # time_select("post", "written_on", :prompt => {:hour => true}) # generic prompt for hours
<del> # time_select("post", "written_on", :prompt => true) # generic prompts for all
<add> # time_select("article", "written_on", :prompt => {:hour => 'Choose hour', :minute => 'Choose minute', :second => 'Choose seconds'})
<add> # time_select("article", "written_on", :prompt => {:hour => true}) # generic prompt for hours
<add> # time_select("article", "written_on", :prompt => true) # generic prompts for all
<ide> #
<ide> # # You can set :ampm option to true which will show the hours as: 12 PM, 01 AM .. 11 PM.
<ide> # time_select 'game', 'game_time', {:ampm => true}
<ide> def time_select(object_name, method, options = {}, html_options = {})
<ide> # If anything is passed in the html_options hash it will be applied to every select tag in the set.
<ide> #
<ide> # ==== Examples
<del> # # Generates a datetime select that, when POSTed, will be stored in the post variable in the written_on
<add> # # Generates a datetime select that, when POSTed, will be stored in the article variable in the written_on
<ide> # # attribute.
<del> # datetime_select("post", "written_on")
<add> # datetime_select("article", "written_on")
<ide> #
<ide> # # Generates a datetime select with a year select that starts at 1995 that, when POSTed, will be stored in the
<del> # # post variable in the written_on attribute.
<del> # datetime_select("post", "written_on", :start_year => 1995)
<add> # # article variable in the written_on attribute.
<add> # datetime_select("article", "written_on", :start_year => 1995)
<ide> #
<ide> # # Generates a datetime select with a default value of 3 days from the current time that, when POSTed, will
<ide> # # be stored in the trip variable in the departing attribute.
<ide> # datetime_select("trip", "departing", :default => 3.days.from_now)
<ide> #
<ide> # # Generate a datetime select with hours in the AM/PM format
<del> # datetime_select("post", "written_on", :ampm => true)
<add> # datetime_select("article", "written_on", :ampm => true)
<ide> #
<del> # # Generates a datetime select that discards the type that, when POSTed, will be stored in the post variable
<add> # # Generates a datetime select that discards the type that, when POSTed, will be stored in the article variable
<ide> # # as the written_on attribute.
<del> # datetime_select("post", "written_on", :discard_type => true)
<add> # datetime_select("article", "written_on", :discard_type => true)
<ide> #
<ide> # # Generates a datetime select with a custom prompt. Use <tt>:prompt => true</tt> for generic prompts.
<del> # datetime_select("post", "written_on", :prompt => {:day => 'Choose day', :month => 'Choose month', :year => 'Choose year'})
<del> # datetime_select("post", "written_on", :prompt => {:hour => true}) # generic prompt for hours
<del> # datetime_select("post", "written_on", :prompt => true) # generic prompts for all
<add> # datetime_select("article", "written_on", :prompt => {:day => 'Choose day', :month => 'Choose month', :year => 'Choose year'})
<add> # datetime_select("article", "written_on", :prompt => {:hour => true}) # generic prompt for hours
<add> # datetime_select("article", "written_on", :prompt => true) # generic prompts for all
<ide> #
<ide> # The selects are prepared for multi-parameter assignment to an Active Record object.
<ide> def datetime_select(object_name, method, options = {}, html_options = {}) | 1 |
Javascript | Javascript | remove usage of require('util') | a89451409ed00d9bbc037e13b4f939b4a4ee93b3 | <ide><path>lib/trace_events.js
<ide> if (!hasTracing || !ownsProcessState)
<ide>
<ide> const { CategorySet, getEnabledCategories } = internalBinding('trace_events');
<ide> const { customInspectSymbol } = require('internal/util');
<del>const { format } = require('util');
<add>const { format } = require('internal/util/inspect');
<ide>
<ide> const enabledTracingObjects = new Set();
<ide> | 1 |
Java | Java | improve perf of operatorignoreelements | 6db98f8750f580995657f83b5620b579c97e6a06 | <ide><path>src/main/java/rx/Observable.java
<ide> public final <T2, D1, D2, R> Observable<R> groupJoin(Observable<T2> right, Func1
<ide> * @see <a href="http://reactivex.io/documentation/operators/ignoreelements.html">ReactiveX operators documentation: IgnoreElements</a>
<ide> */
<ide> public final Observable<T> ignoreElements() {
<del> return filter(UtilityFunctions.alwaysFalse());
<add> return lift(OperatorIgnoreElements.<T> instance());
<ide> }
<ide>
<ide> /**
<ide><path>src/main/java/rx/internal/operators/OperatorIgnoreElements.java
<add>/**
<add> * Copyright 2014 Netflix, Inc.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package rx.internal.operators;
<add>
<add>import rx.Observable.Operator;
<add>import rx.Subscriber;
<add>
<add>public class OperatorIgnoreElements<T> implements Operator<T, T> {
<add>
<add> private static class Holder {
<add> static final OperatorIgnoreElements<?> INSTANCE = new OperatorIgnoreElements<Object>();
<add> }
<add>
<add> @SuppressWarnings("unchecked")
<add> public static <T> OperatorIgnoreElements<T> instance() {
<add> return (OperatorIgnoreElements<T>) Holder.INSTANCE;
<add> }
<add>
<add> private OperatorIgnoreElements() {
<add>
<add> }
<add>
<add> @Override
<add> public Subscriber<? super T> call(final Subscriber<? super T> child) {
<add> Subscriber<T> parent = new Subscriber<T>() {
<add>
<add> @Override
<add> public void onCompleted() {
<add> child.onCompleted();
<add> }
<add>
<add> @Override
<add> public void onError(Throwable e) {
<add> child.onError(e);
<add> }
<add>
<add> @Override
<add> public void onNext(T t) {
<add> // ignore element
<add> }
<add>
<add> };
<add> child.add(parent);
<add> return parent;
<add> }
<add>
<add>}
<ide><path>src/test/java/rx/internal/operators/OperatorIgnoreElementsTest.java
<add>package rx.internal.operators;
<add>
<add>import static org.junit.Assert.assertEquals;
<add>import static org.junit.Assert.assertTrue;
<add>
<add>import java.util.Arrays;
<add>import java.util.concurrent.atomic.AtomicBoolean;
<add>import java.util.concurrent.atomic.AtomicInteger;
<add>
<add>import org.junit.Test;
<add>
<add>import rx.Observable;
<add>import rx.Subscriber;
<add>import rx.functions.Action0;
<add>import rx.functions.Action1;
<add>import rx.observers.TestSubscriber;
<add>
<add>public class OperatorIgnoreElementsTest {
<add>
<add> @Test
<add> public void testWithEmpty() {
<add> assertTrue(Observable.empty().ignoreElements().isEmpty().toBlocking().single());
<add> }
<add>
<add> @Test
<add> public void testWithNonEmpty() {
<add> assertTrue(Observable.just(1, 2, 3).ignoreElements().isEmpty().toBlocking().single());
<add> }
<add>
<add> @Test
<add> public void testUpstreamIsProcessedButIgnored() {
<add> final int num = 10;
<add> final AtomicInteger upstreamCount = new AtomicInteger();
<add> int count = Observable.range(1, num)
<add> .doOnNext(new Action1<Integer>() {
<add> @Override
<add> public void call(Integer t) {
<add> upstreamCount.incrementAndGet();
<add> }
<add> })
<add> .ignoreElements()
<add> .count().toBlocking().single();
<add> assertEquals(num, upstreamCount.get());
<add> assertEquals(0, count);
<add> }
<add>
<add> @Test
<add> public void testCompletedOk() {
<add> TestSubscriber<Object> ts = new TestSubscriber<Object>();
<add> Observable.range(1, 10).ignoreElements().subscribe(ts);
<add> ts.assertNoErrors();
<add> ts.assertReceivedOnNext(Arrays.asList());
<add> ts.assertTerminalEvent();
<add> ts.assertUnsubscribed();
<add> }
<add>
<add> @Test
<add> public void testErrorReceived() {
<add> TestSubscriber<Object> ts = new TestSubscriber<Object>();
<add> RuntimeException ex = new RuntimeException("boo");
<add> Observable.error(ex).ignoreElements().subscribe(ts);
<add> ts.assertReceivedOnNext(Arrays.asList());
<add> ts.assertTerminalEvent();
<add> ts.assertUnsubscribed();
<add> assertEquals(1, ts.getOnErrorEvents().size());
<add> assertEquals("boo", ts.getOnErrorEvents().get(0).getMessage());
<add> }
<add>
<add> @Test
<add> public void testUnsubscribesFromUpstream() {
<add> final AtomicBoolean unsub = new AtomicBoolean();
<add> Observable.range(1, 10).doOnUnsubscribe(new Action0() {
<add> @Override
<add> public void call() {
<add> unsub.set(true);
<add> }})
<add> .subscribe();
<add> assertTrue(unsub.get());
<add> }
<add>
<add> @Test(timeout = 10000)
<add> public void testDoesNotHangAndProcessesAllUsingBackpressure() {
<add> final AtomicInteger upstreamCount = new AtomicInteger();
<add> final AtomicInteger count = new AtomicInteger(0);
<add> int num = 10;
<add> Observable.range(1, num)
<add> //
<add> .doOnNext(new Action1<Integer>() {
<add> @Override
<add> public void call(Integer t) {
<add> upstreamCount.incrementAndGet();
<add> }
<add> })
<add> //
<add> .ignoreElements()
<add> //
<add> .doOnNext(new Action1<Integer>() {
<add>
<add> @Override
<add> public void call(Integer t) {
<add> upstreamCount.incrementAndGet();
<add> }
<add> })
<add> //
<add> .subscribe(new Subscriber<Integer>() {
<add>
<add> @Override
<add> public void onStart() {
<add> request(1);
<add> }
<add>
<add> @Override
<add> public void onCompleted() {
<add>
<add> }
<add>
<add> @Override
<add> public void onError(Throwable e) {
<add> }
<add>
<add> @Override
<add> public void onNext(Integer t) {
<add> count.incrementAndGet();
<add> }
<add> });
<add> assertEquals(num, upstreamCount.get());
<add> assertEquals(0, count.get());
<add> }
<add>
<add>}
<ide><path>src/test/java/rx/internal/operators/OperatorTakeLastOneTest.java
<ide>
<ide> import rx.Observable;
<ide> import rx.Subscriber;
<del>import rx.Subscription;
<ide> import rx.functions.Action0;
<ide> import rx.functions.Action1;
<ide> import rx.observers.TestSubscriber; | 4 |
Text | Text | fix myspace samy worm link [ci skip] | 1502c60ec270cb6b92af151637fc8b47f42b2816 | <ide><path>guides/source/security.md
<ide> In December 2006, 34,000 actual user names and passwords were stolen in a [MySpa
<ide>
<ide> INFO: _CSS Injection is actually JavaScript injection, because some browsers (IE, some versions of Safari and others) allow JavaScript in CSS. Think twice about allowing custom CSS in your web application._
<ide>
<del>CSS Injection is explained best by the well-known [MySpace Samy worm](https://samy.pl/popular/tech.html). This worm automatically sent a friend request to Samy (the attacker) simply by visiting his profile. Within several hours he had over 1 million friend requests, which created so much traffic that MySpace went offline. The following is a technical explanation of that worm.
<add>CSS Injection is explained best by the well-known [MySpace Samy worm](https://samy.pl/myspace/tech.html). This worm automatically sent a friend request to Samy (the attacker) simply by visiting his profile. Within several hours he had over 1 million friend requests, which created so much traffic that MySpace went offline. The following is a technical explanation of that worm.
<ide>
<ide> MySpace blocked many tags, but allowed CSS. So the worm's author put JavaScript into CSS like this:
<ide> | 1 |
Java | Java | fix element inspector | a876b234c6473fdabfef9b1ff728cd7e8ff62a52 | <ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/UIManagerModule.java
<ide> public void findSubviewIn(
<ide> final Callback callback) {
<ide> mOperationsQueue.enqueueFindTargetForTouch(
<ide> reactTag,
<del> point.getInt(0),
<del> point.getInt(1),
<add> Math.round(PixelUtil.toPixelFromDIP(point.getDouble(0))),
<add> Math.round(PixelUtil.toPixelFromDIP(point.getDouble(1))),
<ide> callback);
<ide> }
<ide>
<ide><path>ReactAndroid/src/main/java/com/facebook/react/uimanager/UIViewOperationQueue.java
<ide> import com.facebook.react.animation.Animation;
<ide> import com.facebook.react.animation.AnimationRegistry;
<ide> import com.facebook.react.bridge.Callback;
<del>import com.facebook.react.bridge.ReactContext;
<ide> import com.facebook.react.bridge.ReactApplicationContext;
<add>import com.facebook.react.bridge.ReactContext;
<ide> import com.facebook.react.bridge.ReadableArray;
<ide> import com.facebook.systrace.Systrace;
<ide> import com.facebook.systrace.SystraceMessage;
<ide> public void execute() {
<ide>
<ide> final int touchTargetReactTag = mNativeViewHierarchyManager.findTargetTagForTouch(
<ide> mReactTag,
<del> PixelUtil.toPixelFromDIP(mTargetX) + containerX,
<del> PixelUtil.toPixelFromDIP(mTargetY) + containerY);
<add> mTargetX + containerX,
<add> mTargetY + containerY);
<ide>
<ide> try {
<ide> mNativeViewHierarchyManager.measure( | 2 |
PHP | PHP | update query building in translatebehavior | 0fc9a2c29c489177afeecaccf4eb7ddb7ab153a7 | <ide><path>lib/Cake/Model/Behavior/TranslateBehavior.php
<ide> public function beforeFind(Model $Model, $query) {
<ide> if (empty($query['fields'])) {
<ide> $addFields = $fields;
<ide> } elseif (is_array($query['fields'])) {
<add> $isAllFields = (
<add> in_array($Model->alias . '.' . '*', $query['fields']) ||
<add> in_array($Model->escapeField('*'), $query['fields'])
<add> );
<ide> foreach ($fields as $key => $value) {
<ide> $field = (is_numeric($key)) ? $value : $key;
<del>
<ide> if (
<del> in_array($Model->escapeField('*'), $query['fields']) ||
<add> $isAllFields ||
<ide> in_array($Model->alias . '.' . $field, $query['fields']) ||
<ide> in_array($field, $query['fields'])
<ide> ) { | 1 |
Javascript | Javascript | fix warnings of styleinspector | 97741af8b99ea8e443d41b80c4d9a8fd843a96ab | <ide><path>Libraries/Inspector/StyleInspector.js
<ide> class StyleInspector extends React.Component {
<ide> return (
<ide> <View style={styles.container}>
<ide> <View>
<del> {names.map(name => <Text style={styles.attr}>{name}:</Text>)}
<add> {names.map(name => <Text key={name} style={styles.attr}>{name}:</Text>)}
<ide> </View>
<ide>
<ide> <View>
<ide> {names.map(name => {
<ide> var value = typeof this.props.style[name] === 'object' ? JSON.stringify(this.props.style[name]) : this.props.style[name];
<del> return <Text style={styles.value}>{value}</Text>;
<add> return <Text key={name} style={styles.value}>{value}</Text>;
<ide> } ) }
<ide> </View>
<ide> </View> | 1 |
Mixed | Text | add docs update - part 1 | a9d36d4726fc8eea02184b089ee6ed1d02e4c75e | <ide><path>docs/api-guide/fields.md
<ide> Corresponds to `django.forms.fields.RegexField`
<ide>
<ide> A date representation.
<ide>
<del>Uses `DATE_INPUT_FORMATS` to validate date.
<del>
<ide> Optionally takes `format` as parameter to replace the matching pattern.
<ide>
<ide> Corresponds to `django.db.models.fields.DateField`
<ide>
<add>**Signature:** `DateField(input_formats=None, output_format=False)`
<add>
<add> - `input_formats` designates which input formats are supported. This will override the `DATE_INPUT_FORMATS`
<add>
<add> - `output_format` designates which output format will be used. This will override the `DATE_OUTPUT_FORMAT`
<add>
<ide> ## DateTimeField
<ide>
<ide> A date and time representation.
<ide>
<del>Uses `DATETIME_INPUT_FORMATS` to validate date_time.
<del>
<ide> Optionally takes `format` as parameter to replace the matching pattern.
<ide>
<ide> Corresponds to `django.db.models.fields.DateTimeField`
<ide> If you want to override this behavior, you'll need to declare the `DateTimeField
<ide> class Meta:
<ide> model = Comment
<ide>
<add>**Signature:** `DateTimeField(input_formats=None, output_format=False)`
<add>
<add> - `input_formats` designates which input formats are supported. This will override the `DATETIME_INPUT_FORMATS`
<add>
<add> - `output_format` designates which output format will be used. This will override the `DATETIME_OUTPUT_FORMAT`
<add>
<ide> ## TimeField
<ide>
<ide> A time representation.
<ide>
<del>Uses `TIME_INPUT_FORMATS` to validate time.
<del>
<ide> Optionally takes `format` as parameter to replace the matching pattern.
<ide>
<ide> Corresponds to `django.db.models.fields.TimeField`
<ide>
<add>**Signature:** `TimeField(input_formats=None, output_format=False)`
<add>
<add> - `input_formats` designates which input formats are supported. This will override the `TIME_INPUT_FORMATS`
<add>
<add> - `output_format` designates which output format will be used. This will override the `TIME_OUTPUT_FORMAT`
<add>
<ide> ## IntegerField
<ide>
<ide> An integer representation.
<ide><path>docs/api-guide/settings.md
<ide> The name of a parameter in the URL conf that may be used to provide a format suf
<ide>
<ide> Default: `'format'`
<ide>
<add>## DATE_INPUT_FORMATS
<add>
<add>Default:
<add>
<add> (
<add> '%Y-%m-%d', # '1984-07-31'
<add> )
<add>
<add>## DATE_OUTPUT_FORMAT
<add>
<add>## DATETIME_INPUT_FORMATS
<add>
<add>Default:
<add>
<add> (
<add> '%Y-%m-%d', # '1984-07-31'
<add> '%Y-%m-%d %H:%M', # '1984-07-31 04:31'
<add> '%Y-%m-%d %H:%M:%S', # '1984-07-31 04:31:59'
<add> '%Y-%m-%d %H:%M:%S.%f', # '1984-07-31 04:31:59.000200'
<add> )
<add>
<add>## DATETIME_OUTPUT_FORMAT
<add>
<add>## TIME_INPUT_FORMATS
<add>
<add>Default:
<add>
<add> (
<add> '%H:%M', # '04:31'
<add> '%H:%M:%S', # '04:31:59'
<add> '%H:%M:%S.%f', # '04:31:59.000200'
<add> )
<add>
<add>## TIME_OUTPUT_FORMAT
<add>
<ide> [cite]: http://www.python.org/dev/peps/pep-0020/
<ide><path>docs/topics/release-notes.md
<ide> You can determine your currently installed version using `pip freeze`:
<ide> * Request authentication is no longer lazily evaluated, instead authentication is always run, which results in more consistent, obvious behavior. Eg. Supplying bad auth credentials will now always return an error response, even if no permissions are set on the view.
<ide> * Bugfix for serializer data being uncacheable with pickle protocol 0.
<ide> * Bugfixes for model field validation edge-cases.
<del>* Support `DATE_INPUT_FORMATS` for `DateField` validation
<del>* Support `DATETIME_INPUT_FORMATS` for `DateTimeField` validation
<del>* Support `TIME_INPUT_FORMATS` for `TimeField` validation
<add>* Support for custom input and output formats for `DateField`, `DateTimeField` and `TimeField`
<ide>
<ide> ### 2.2.1
<ide>
<ide><path>rest_framework/fields.py
<ide> class DateField(WritableField):
<ide> }
<ide> empty = None
<ide>
<del> def __init__(self, *args, **kwargs):
<del> self.input_formats = kwargs.pop('input_formats', api_settings.DATE_INPUT_FORMATS)
<del> self.output_format = kwargs.pop('output_format', api_settings.DATE_OUTPUT_FORMAT)
<add> def __init__(self, input_formats=None, output_format=None, *args, **kwargs):
<add> self.input_formats = input_formats or api_settings.DATE_INPUT_FORMATS
<add> self.output_format = output_format or api_settings.DATE_OUTPUT_FORMAT
<ide> super(DateField, self).__init__(*args, **kwargs)
<ide>
<ide> def from_native(self, value):
<ide> class DateTimeField(WritableField):
<ide> }
<ide> empty = None
<ide>
<del> def __init__(self, *args, **kwargs):
<del> self.input_formats = kwargs.pop('input_formats', api_settings.DATETIME_INPUT_FORMATS)
<del> self.output_format = kwargs.pop('output_format', api_settings.DATETIME_OUTPUT_FORMAT)
<add> def __init__(self, input_formats=None, output_format=None, *args, **kwargs):
<add> self.input_formats = input_formats or api_settings.DATETIME_INPUT_FORMATS
<add> self.output_format = output_format or api_settings.DATETIME_OUTPUT_FORMAT
<ide> super(DateTimeField, self).__init__(*args, **kwargs)
<ide>
<ide> def from_native(self, value):
<ide> class TimeField(WritableField):
<ide> }
<ide> empty = None
<ide>
<del> def __init__(self, *args, **kwargs):
<del> self.input_formats = kwargs.pop('input_formats', api_settings.TIME_INPUT_FORMATS)
<del> self.output_format = kwargs.pop('output_format', api_settings.TIME_OUTPUT_FORMAT)
<add> def __init__(self, input_formats=None, output_format=None, *args, **kwargs):
<add> self.input_formats = input_formats or api_settings.TIME_INPUT_FORMATS
<add> self.output_format = output_format or api_settings.TIME_OUTPUT_FORMAT
<ide> super(TimeField, self).__init__(*args, **kwargs)
<ide>
<ide> def from_native(self, value): | 4 |
Text | Text | add performance notes for fs.readfile | 408e9d3a3a26834a83cc3e77269ccbd3047a8903 | <ide><path>doc/api/fs.md
<ide> system requests but rather the internal buffering `fs.readFile` performs.
<ide> the call to `fs.readFile()` with the same file descriptor, would give
<ide> `'World'`, rather than `'Hello World'`.
<ide>
<add>### Performance Considerations
<add>
<add>The `fs.readFile()` method asynchronously reads the contents of a file into
<add>memory one chunk at a time, allowing the event loop to turn between each chunk.
<add>This allows the read operation to have less impact on other activity that may
<add>be using the underlying libuv thread pool but means that it will take longer
<add>to read a complete file into memory.
<add>
<add>The additional read overhead can vary broadly on different systems and depends
<add>on the type of file being read. If the file type is not a regular file (a pipe
<add>for instance) and Node.js is unable to determine an actual file size, each read
<add>operation will load on 64kb of data. For regular files, each read will process
<add>512kb of data.
<add>
<add>For applications that require as-fast-as-possible reading of file contents, it
<add>is better to use `fs.read()` directly and for application code to manage
<add>reading the full contents of the file itself.
<add>
<add>The Node.js GitHub issue [#25741][] provides more information and a detailed
<add>analysis on the performance of `fs.readFile()` for multiple file sizes in
<add>different Node.js versions.
<add>
<ide> ## `fs.readFileSync(path[, options])`
<ide> <!-- YAML
<ide> added: v0.1.8
<ide> through `fs.open()` or `fs.writeFile()` or `fsPromises.open()`) will fail with
<ide> A call to `fs.ftruncate()` or `filehandle.truncate()` can be used to reset
<ide> the file contents.
<ide>
<add>[#25741]: https://github.com/nodejs/node/issues/25741
<ide> [Caveats]: #fs_caveats
<ide> [Common System Errors]: errors.md#errors_common_system_errors
<ide> [FS constants]: #fs_fs_constants_1 | 1 |
Text | Text | fix docs for | 857e0ba32e38f02b96e7c6753d902aca7a7922fc | <ide><path>docs/reference/commandline/version.md
<ide> weight=1
<ide>
<ide> Show the Docker version information.
<ide>
<del>Show the Docker version, API version, Git commit, Go version and
<del>OS/architecture of both Docker client and daemon. Example use:
<add>Show the Docker version, API version, Go version, Git commit, Build date/time,
<add>and OS/architecture of both Docker client and daemon. Example use:
<ide>
<ide> $ docker version
<del> Client version: 1.5.0
<del> Client API version: 1.17
<del> Go version (client): go1.4.1
<del> Git commit (client): a8a31ef
<del> OS/Arch (client): darwin/amd64
<del> Server version: 1.5.0
<del> Server API version: 1.17
<del> Go version (server): go1.4.1
<del> Git commit (server): a8a31ef
<del> OS/Arch (server): linux/amd64
<ide>\ No newline at end of file
<add> Client:
<add> Version: 1.8.0
<add> API version: 1.20
<add> Go version: go1.4.2
<add> Git commit: f5bae0a
<add> Built: Tue Jun 23 17:56:00 UTC 2015
<add> OS/Arch: linux/amd64
<add>
<add> Server:
<add> Version: 1.8.0
<add> API version: 1.20
<add> Go version: go1.4.2
<add> Git commit: f5bae0a
<add> Built: Tue Jun 23 17:56:00 UTC 2015
<add> OS/Arch: linux/amd64
<ide>\ No newline at end of file
<ide><path>man/docker-version.1.md
<ide> % DOCKER(1) Docker User Manuals
<ide> % Docker Community
<del>% JUNE 2014
<add>% JUNE 2015
<ide> # NAME
<ide> docker-version - Show the Docker version information.
<ide>
<ide> # SYNOPSIS
<ide> **docker version**
<ide>
<add># DESCRIPTION
<add>This command displays version information for both the Docker client and
<add>daemon.
<ide>
<ide> # OPTIONS
<ide> There are no available options.
<ide>
<add># EXAMPLES
<add>
<add>## Display Docker version information
<add>
<add>Here is a sample output:
<add>
<add> $ docker version
<add> Client:
<add> Version: 1.8.0
<add> API version: 1.20
<add> Go version: go1.4.2
<add> Git commit: f5bae0a
<add> Built: Tue Jun 23 17:56:00 UTC 2015
<add> OS/Arch: linux/amd64
<add>
<add> Server:
<add> Version: 1.8.0
<add> API version: 1.20
<add> Go version: go1.4.2
<add> Git commit: f5bae0a
<add> Built: Tue Jun 23 17:56:00 UTC 2015
<add> OS/Arch: linux/amd64
<add>
<ide> # HISTORY
<ide> June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
<add>June 2015, updated by John Howard <jhoward@microsoft.com> | 2 |
Javascript | Javascript | follow jquery for index of arrays of objects | 18a2e4fbfc44216c31bbcdf7705ca87c53e6f1fa | <ide><path>src/ng/http.js
<ide> function $HttpParamSerializerJQLikeProvider() {
<ide> function serialize(toSerialize, prefix, topLevel) {
<ide> if (toSerialize === null || isUndefined(toSerialize)) return;
<ide> if (isArray(toSerialize)) {
<del> forEach(toSerialize, function(value) {
<del> serialize(value, prefix + '[]');
<add> forEach(toSerialize, function(value, index) {
<add> serialize(value, prefix + '[' + (isObject(value) ? index : '') + ']');
<ide> });
<ide> } else if (isObject(toSerialize) && !isDate(toSerialize)) {
<ide> forEachSorted(toSerialize, function(value, key) {
<ide><path>test/ng/httpSpec.js
<ide> describe('$http param serializers', function() {
<ide>
<ide> it('should serialize nested objects by repeating param name with [key] suffix', function() {
<ide> expect(jqrSer({a: ['b', {c: 'd'}], e: {f: 'g', 'h': ['i', 'j']}})).toEqual(
<del> 'a%5B%5D=b&a%5B%5D%5Bc%5D=d&e%5Bf%5D=g&e%5Bh%5D%5B%5D=i&e%5Bh%5D%5B%5D=j');
<del> //a[]=b&a[][c]=d&e[f]=g&e[h][]=i&e[h][]=j
<add> 'a%5B%5D=b&a%5B1%5D%5Bc%5D=d&e%5Bf%5D=g&e%5Bh%5D%5B%5D=i&e%5Bh%5D%5B%5D=j');
<add> //a[]=b&a[1][c]=d&e[f]=g&e[h][]=i&e[h][]=j
<add> });
<add>
<add> it('should serialize objects inside array elements using their index', function() {
<add> expect(jqrSer({a: ['b', 'c'], d: [{e: 'f', g: 'h'}, 'i', {j: 'k'}]})).toEqual(
<add> 'a%5B%5D=b&a%5B%5D=c&d%5B0%5D%5Be%5D=f&d%5B0%5D%5Bg%5D=h&d%5B%5D=i&d%5B2%5D%5Bj%5D=k');
<add> //a[]=b&a[]=c&d[0][e]=f&d[0][g]=h&d[]=i&d[2][j]=k
<ide> });
<ide> });
<ide> | 2 |
Ruby | Ruby | push the collectors up to the abstract adapter | 8caaa08ae1cd76b26a89b0d29d50396ffd76882e | <ide><path>activerecord/lib/active_record/connection_adapters/abstract_adapter.rb
<ide> require 'active_record/connection_adapters/abstract/schema_dumper'
<ide> require 'active_record/connection_adapters/abstract/schema_creation'
<ide> require 'monitor'
<add>require 'arel/collectors/bind'
<add>require 'arel/collectors/sql_string'
<ide>
<ide> module ActiveRecord
<ide> module ConnectionAdapters # :nodoc:
<ide> def initialize(connection, logger = nil, pool = nil) #:nodoc:
<ide> @prepared_statements = false
<ide> end
<ide>
<add> class BindCollector < Arel::Collectors::Bind
<add> def compile(bvs, conn)
<add> super(bvs.map { |bv| conn.quote(*bv.reverse) })
<add> end
<add> end
<add>
<add> class SQLString < Arel::Collectors::SQLString
<add> def compile(bvs, conn)
<add> super(bvs)
<add> end
<add> end
<add>
<add> def collector
<add> if @prepared_statements
<add> SQLString.new
<add> else
<add> BindCollector.new
<add> end
<add> end
<add>
<ide> def valid_type?(type)
<ide> true
<ide> end
<ide><path>activerecord/lib/active_record/connection_adapters/abstract_mysql_adapter.rb
<ide> def initialize(connection, logger, connection_options, config)
<ide> end
<ide> end
<ide>
<del> class BindCollector < Arel::Collectors::Bind
<del> def compile(bvs, conn)
<del> super(bvs.map { |bv| conn.quote(*bv.reverse) })
<del> end
<del> end
<del>
<del> class SQLString < Arel::Collectors::SQLString
<del> def compile(bvs, conn)
<del> super(bvs)
<del> end
<del> end
<del>
<del> def collector
<del> if @prepared_statements
<del> SQLString.new
<del> else
<del> BindCollector.new
<del> end
<del> end
<del>
<ide> def adapter_name #:nodoc:
<ide> self.class::ADAPTER_NAME
<ide> end
<ide><path>activerecord/lib/active_record/connection_adapters/sqlite3_adapter.rb
<ide> def initialize(connection, logger, config)
<ide> end
<ide> end
<ide>
<del> class BindCollector < Arel::Collectors::Bind
<del> def compile(bvs, conn)
<del> super(bvs.map { |bv| conn.quote(*bv.reverse) })
<del> end
<del> end
<del>
<del> class SQLString < Arel::Collectors::SQLString
<del> def compile(bvs, conn)
<del> super(bvs)
<del> end
<del> end
<del>
<del> def collector
<del> if @prepared_statements
<del> SQLString.new
<del> else
<del> BindCollector.new
<del> end
<del> end
<del>
<ide> def adapter_name #:nodoc:
<ide> 'SQLite'
<ide> end | 3 |
Mixed | Text | add changelog enty for | f875d319ad32d63ce2d3e8a343b059858f28dd97 | <ide><path>activerecord/CHANGELOG.md
<add>* Support array as root element in JSON fields.
<add>
<add> *Alexey Noskov & Francesco Rodriguez*
<add>
<ide> * Removed support for deprecated `counter_sql` in associations.
<ide>
<ide> *Neeraj Singh*
<ide><path>activerecord/test/cases/adapters/postgresql/json_test.rb
<ide> def test_rewrite_array_json_value
<ide> x.payload = ['v1', {'k2' => 'v2'}, 'v3']
<ide> assert x.save!
<ide> end
<del>
<ide> end | 2 |
Text | Text | update security docs for seccomp/apparmor | 61553fc2f538a7fe8f83e6b41a93722b5c61d374 | <ide><path>docs/installation/binaries.md
<ide> need to add `sudo` to all the client commands.
<ide>
<ide> > **Warning**:
<ide> > The *docker* group (or the group specified with `-G`) is root-equivalent;
<del>> see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
<add>> see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
<ide>
<ide> ## Upgrades
<ide>
<ide><path>docs/installation/centos.md
<ide> makes the ownership of the Unix socket read/writable by the `docker` group.
<ide>
<ide> >**Warning**: The `docker` group is equivalent to the `root` user; For details
<ide> >on how this impacts security in your system, see [*Docker Daemon Attack
<del>>Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
<add>>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
<ide>
<ide> To create the `docker` group and add your user:
<ide>
<ide><path>docs/installation/debian.md
<ide> use the `-G` flag to specify an alternative group.
<ide>
<ide> > **Warning**:
<ide> > The `docker` group (or the group specified with the `-G` flag) is
<del>> `root`-equivalent; see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
<add>> `root`-equivalent; see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
<ide>
<ide> **Example:**
<ide>
<ide><path>docs/installation/fedora.md
<ide> makes the ownership of the Unix socket read/writable by the `docker` group.
<ide>
<ide> >**Warning**: The `docker` group is equivalent to the `root` user; For details
<ide> >on how this impacts security in your system, see [*Docker Daemon Attack
<del>>Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
<add>>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
<ide>
<ide> To create the `docker` group and add your user:
<ide>
<ide><path>docs/installation/oracle.md
<ide> makes the ownership of the Unix socket read/writable by the `docker` group.
<ide>
<ide> >**Warning**: The `docker` group is equivalent to the `root` user; For details
<ide> >on how this impacts security in your system, see [*Docker Daemon Attack
<del>>Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
<add>>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
<ide>
<ide> To create the `docker` group and add your user:
<ide>
<ide><path>docs/installation/rhel.md
<ide> makes the ownership of the Unix socket read/writable by the `docker` group.
<ide>
<ide> >**Warning**: The `docker` group is equivalent to the `root` user; For details
<ide> >on how this impacts security in your system, see [*Docker Daemon Attack
<del>>Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
<add>>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
<ide>
<ide> To create the `docker` group and add your user:
<ide>
<ide><path>docs/installation/ubuntulinux.md
<ide> makes the ownership of the Unix socket read/writable by the `docker` group.
<ide>
<ide> >**Warning**: The `docker` group is equivalent to the `root` user; For details
<ide> >on how this impacts security in your system, see [*Docker Daemon Attack
<del>>Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
<add>>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
<ide>
<ide> To create the `docker` group and add your user:
<ide>
<ide><path>docs/reference/commandline/login.md
<ide> adding the server name.
<ide> `docker login` requires user to use `sudo` or be `root`, except when:
<ide>
<ide> 1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`.
<del>2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/articles/security/#docker-daemon-attack-surface) for details.
<add>2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details.
<ide>
<ide> You can log into any public or private repository for which you have
<ide> credentials. When you log in, the command stores encoded credentials in
<ide><path>docs/security/apparmor.md
<ide> <!-- [metadata]>
<ide> +++
<del>draft = true
<add>title = "AppArmor security profiles for Docker"
<add>description = "Enabling AppArmor in Docker"
<add>keywords = ["AppArmor, security, docker, documentation"]
<add>[menu.main]
<add>parent= "smn_secure_docker"
<ide> +++
<ide> <![end-metadata]-->
<ide>
<del>AppArmor security profiles for Docker
<del>--------------------------------------
<add># AppArmor security profiles for Docker
<ide>
<del>AppArmor (Application Armor) is a security module that allows a system
<del>administrator to associate a security profile with each program. Docker
<add>AppArmor (Application Armor) is a Linux security module that protects an
<add>operating system and its applications from security threats. To use it, a system
<add>administrator associates an AppArmor security profile with each program. Docker
<ide> expects to find an AppArmor policy loaded and enforced.
<ide>
<del>Container profiles are loaded automatically by Docker. A profile
<del>for the Docker Engine itself also exists and is installed
<del>with the official *.deb* packages. Advanced users and package
<del>managers may find the profile for */usr/bin/docker* underneath
<del>[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
<del>in the Docker Engine source repository.
<add>Docker automatically loads container profiles. A profile for the Docker Engine
<add>itself also exists and is installed with the official *.deb* packages in
<add>`/etc/apparmor.d/docker` file.
<add>
<ide>
<add>## Understand the policies
<add>
<add>The `docker-default` profile is the default for running containers. It is
<add>moderately protective while providing wide application compatibility. The
<add>profile is the following:
<add>
<add>```
<add>#include <tunables/global>
<ide>
<del>Understand the policies
<del>------------------------
<ide>
<del>The `docker-default` profile the default for running
<del>containers. It is moderately protective while
<del>providing wide application compatibility.
<add>profile docker-default flags=(attach_disconnected,mediate_deleted) {
<ide>
<del>The system's standard `unconfined` profile inherits all
<del>system-wide policies, applying path-based policies
<del>intended for the host system inside of containers.
<del>This was the default for privileged containers
<del>prior to Docker 1.8.
<add> #include <abstractions/base>
<ide>
<ide>
<del>Overriding the profile for a container
<del>---------------------------------------
<add> network,
<add> capability,
<add> file,
<add> umount,
<ide>
<del>Users may override the AppArmor profile using the
<del>`security-opt` option (per-container).
<add> deny @{PROC}/{*,**^[0-9*],sys/kernel/shm*} wkx,
<add> deny @{PROC}/sysrq-trigger rwklx,
<add> deny @{PROC}/mem rwklx,
<add> deny @{PROC}/kmem rwklx,
<add> deny @{PROC}/kcore rwklx,
<ide>
<del>For example, the following explicitly specifies the default policy:
<add> deny mount,
<ide>
<add> deny /sys/[^f]*/** wklx,
<add> deny /sys/f[^s]*/** wklx,
<add> deny /sys/fs/[^c]*/** wklx,
<add> deny /sys/fs/c[^g]*/** wklx,
<add> deny /sys/fs/cg[^r]*/** wklx,
<add> deny /sys/firmware/efi/efivars/** rwklx,
<add> deny /sys/kernel/security/** rwklx,
<add>}
<ide> ```
<add>
<add>When you run a container, it uses the `docker-default` policy unless you
<add>override it with the `security-opt` option. For example, the following
<add>explicitly specifies the default policy:
<add>
<add>```bash
<ide> $ docker run --rm -it --security-opt apparmor:docker-default hello-world
<ide> ```
<ide>
<add>## Contributing to AppArmor code in Docker
<add>
<add>Advanced users and package managers can find a profile for `/usr/bin/docker`
<add>underneath
<add>[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
<add>in the Docker Engine source repository.
<ide><path>docs/security/index.md
<add><!-- [metadata]>
<add>+++
<add>title = "Work with Docker security"
<add>description = "Sec"
<add>keywords = ["seccomp, security, docker, documentation"]
<add>[menu.main]
<add>identifier="smn_secure_docker"
<add>parent= "mn_use_docker"
<add>+++
<add><![end-metadata]-->
<add>
<add># Work with Docker security
<add>
<add>This section discusses the security features you can configure and use within your Docker Engine installation.
<add>
<add>* You can configure Docker's trust features so that your users can push and pull trusted images. To learn how to do this, see [Use trusted images](trust/index.md) in this section.
<add>
<add>* You can configure secure computing mode (Seccomp) policies to secure system calls in a container. For more information, see [Seccomp security profiles for Docker](seccomp.md).
<add>
<add>* An AppArmor profile for Docker is installed with the official *.deb* packages. For information about this profile and overriding it, see [AppArmor security profiles for Docker](apparmor.md).
<ide><path>docs/security/seccomp.md
<ide> title = "Seccomp security profiles for Docker"
<ide> description = "Enabling seccomp in Docker"
<ide> keywords = ["seccomp, security, docker, documentation"]
<add>[menu.main]
<add>parent= "smn_secure_docker"
<ide> +++
<ide> <![end-metadata]-->
<ide>
<del>Seccomp security profiles for Docker
<del>------------------------------------
<add># Seccomp security profiles for Docker
<ide>
<del>The seccomp() system call operates on the Secure Computing (seccomp)
<del>state of the calling process.
<add>Secure computing mode (Seccomp) is a Linux kernel feature. You can use it to
<add>restrict the actions available within the container. The `seccomp()` system
<add>call operates on the seccomp state of the calling process. You can use this
<add>feature to restrict your application's access.
<ide>
<del>This operation is available only if the kernel is configured
<del>with `CONFIG_SECCOMP` enabled.
<add>This feature is available only if the kernel is configured with `CONFIG_SECCOMP`
<add>enabled.
<ide>
<del>This allows for allowing or denying of certain syscalls in a container.
<add>## Passing a profile for a container
<ide>
<del>Passing a profile for a container
<del>---------------------------------
<del>
<del>Users may pass a seccomp profile using the `security-opt` option
<del>(per-container).
<del>
<del>The profile has layout in the following form:
<add>The default seccomp profile provides a sane default for running containers with
<add>seccomp. It is moderately protective while providing wide application
<add>compatibility. The default Docker profile has layout in the following form:
<ide>
<ide> ```
<ide> {
<ide> The profile has layout in the following form:
<ide> }
<ide> ```
<ide>
<del>Then you can run with:
<add>When you run a container, it uses the default profile unless you override
<add>it with the `security-opt` option. For example, the following explicitly
<add>specifies the default policy:
<ide>
<ide> ```
<ide> $ docker run --rm -it --security-opt seccomp:/path/to/seccomp/profile.json hello-world
<ide> ```
<ide>
<del>Default Profile
<del>---------------
<del>
<del>The default seccomp profile provides a sane default for running
<del>containers with seccomp. It is moderately protective while
<del>providing wide application compatibility.
<del>
<del>
<del>### Overriding the default profile for a container
<del>
<del>You can pass `unconfined` to run a container without the default seccomp
<del>profile.
<del>
<del>```
<del>$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
<del> unshare --map-root-user --user sh -c whoami
<del>```
<del>
<ide> ### Syscalls blocked by the default profile
<ide>
<ide> Docker's default seccomp profile is a whitelist which specifies the calls that
<ide> the reason each syscall is blocked rather than white-listed.
<ide> | Syscall | Description |
<ide> |---------------------|---------------------------------------------------------------------------------------------------------------------------------------|
<ide> | `acct` | Accounting syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_PACCT`. |
<del>| `add_key` | Prevent containers from using the kernel keyring, which is not namespaced. |
<del>| `adjtimex` | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced. |
<del>| `bpf` | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`. |
<del>| `clock_adjtime` | Time/date is not namespaced. |
<del>| `clock_settime` | Time/date is not namespaced. |
<del>| `clone` | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`. |
<del>| `create_module` | Deny manipulation and functions on kernel modules. |
<del>| `delete_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<del>| `finit_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<del>| `get_kernel_syms` | Deny retrieval of exported kernel and module symbols. |
<del>| `get_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<del>| `init_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<del>| `ioperm` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. |
<del>| `iopl` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. |
<del>| `kcmp` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<del>| `kexec_file_load` | Sister syscall of `kexec_load` that does the same thing, slightly different arguments. |
<del>| `kexec_load` | Deny loading a new kernel for later execution. |
<del>| `keyctl` | Prevent containers from using the kernel keyring, which is not namespaced. |
<del>| `lookup_dcookie` | Tracing/profiling syscall, which could leak a lot of information on the host. |
<del>| `mbind` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<del>| `modify_ldt` | Old syscall only used in 16-bit code and a potential information leak. |
<del>| `mount` | Deny mounting, already gated by `CAP_SYS_ADMIN`. |
<del>| `move_pages` | Syscall that modifies kernel memory and NUMA settings. |
<del>| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`. |
<del>| `nfsservctl` | Deny interaction with the kernel nfs daemon. |
<del>| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`. |
<del>| `perf_event_open` | Tracing/profiling syscall, which could leak a lot of information on the host. |
<del>| `personality` | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns. |
<del>| `pivot_root` | Deny `pivot_root`, should be privileged operation. |
<del>| `process_vm_readv` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<del>| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<del>| `ptrace` | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`. |
<del>| `query_module` | Deny manipulation and functions on kernel modules. |
<del>| `quotactl` | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`. |
<del>| `reboot` | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`. |
<add>| `add_key` | Prevent containers from using the kernel keyring, which is not namespaced. |
<add>| `adjtimex` | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced. |
<add>| `bpf` | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`. |
<add>| `clock_adjtime` | Time/date is not namespaced. |
<add>| `clock_settime` | Time/date is not namespaced. |
<add>| `clone` | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`. |
<add>| `create_module` | Deny manipulation and functions on kernel modules. |
<add>| `delete_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<add>| `finit_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<add>| `get_kernel_syms` | Deny retrieval of exported kernel and module symbols. |
<add>| `get_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<add>| `init_module` | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`. |
<add>| `ioperm` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. |
<add>| `iopl` | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`. |
<add>| `kcmp` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<add>| `kexec_file_load` | Sister syscall of `kexec_load` that does the same thing, slightly different arguments. |
<add>| `kexec_load` | Deny loading a new kernel for later execution. |
<add>| `keyctl` | Prevent containers from using the kernel keyring, which is not namespaced. |
<add>| `lookup_dcookie` | Tracing/profiling syscall, which could leak a lot of information on the host. |
<add>| `mbind` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<add>| `modify_ldt` | Old syscall only used in 16-bit code and a potential information leak. |
<add>| `mount` | Deny mounting, already gated by `CAP_SYS_ADMIN`. |
<add>| `move_pages` | Syscall that modifies kernel memory and NUMA settings. |
<add>| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`. |
<add>| `nfsservctl` | Deny interaction with the kernel nfs daemon. |
<add>| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`. |
<add>| `perf_event_open` | Tracing/profiling syscall, which could leak a lot of information on the host. |
<add>| `personality` | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns. |
<add>| `pivot_root` | Deny `pivot_root`, should be privileged operation. |
<add>| `process_vm_readv` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<add>| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`. |
<add>| `ptrace` | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`. |
<add>| `query_module` | Deny manipulation and functions on kernel modules. |
<add>| `quotactl` | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`. |
<add>| `reboot` | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`. |
<ide> | `restart_syscall` | Don't allow containers to restart a syscall. Possible seccomp bypass see: https://code.google.com/p/chromium/issues/detail?id=408827. |
<del>| `request_key` | Prevent containers from using the kernel keyring, which is not namespaced. |
<del>| `set_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<del>| `setns` | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`. |
<del>| `settimeofday` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. |
<del>| `stime` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. |
<del>| `swapon` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. |
<del>| `swapoff` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. |
<del>| `sysfs` | Obsolete syscall. |
<del>| `_sysctl` | Obsolete, replaced by /proc/sys. |
<del>| `umount` | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`. |
<del>| `umount2` | Should be a privileged operation. |
<del>| `unshare` | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`. |
<del>| `uselib` | Older syscall related to shared libraries, unused for a long time. |
<del>| `ustat` | Obsolete syscall. |
<del>| `vm86` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. |
<del>| `vm86old` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. |
<add>| `request_key` | Prevent containers from using the kernel keyring, which is not namespaced. |
<add>| `set_mempolicy` | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`. |
<add>| `setns` | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`. |
<add>| `settimeofday` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. |
<add>| `stime` | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`. |
<add>| `swapon` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. |
<add>| `swapoff` | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`. |
<add>| `sysfs` | Obsolete syscall. |
<add>| `_sysctl` | Obsolete, replaced by /proc/sys. |
<add>| `umount` | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`. |
<add>| `umount2` | Should be a privileged operation. |
<add>| `unshare` | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`. |
<add>| `uselib` | Older syscall related to shared libraries, unused for a long time. |
<add>| `ustat` | Obsolete syscall. |
<add>| `vm86` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. |
<add>| `vm86old` | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`. |
<add>
<add>## Run without the default seccomp profile
<add>
<add>You can pass `unconfined` to run a container without the default seccomp
<add>profile.
<add>
<add>```
<add>$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
<add> unshare --map-root-user --user sh -c whoami
<add>```
<add><path>docs/security/security.md
<del><path>docs/articles/security.md
<ide> <!--[metadata]>
<ide> +++
<add>aliases = ["/engine/articles/security/"]
<ide> title = "Docker security"
<ide> description = "Review of the Docker Daemon attack surface"
<ide> keywords = ["Docker, Docker documentation, security"]
<ide> [menu.main]
<del>parent = "smn_administrate"
<del>weight = 2
<add>parent = "smn_secure_docker"
<add>weight =-99
<ide> +++
<ide> <![end-metadata]-->
<ide>
<ide> containerization systems, these are simply kernels features that may
<ide> be implemented in Docker as well. We welcome users to submit issues,
<ide> pull requests, and communicate via the mailing list.
<ide>
<del>References:
<add>## Related Information
<ide>
<del>* [Docker Containers: How Secure Are They? (2013)](
<del>http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/).
<del>* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e).
<add>* [Use trusted images](../security/trust/index.md)
<add>* [Seccomp security profiles for Docker](../security/seccomp.md)
<add>* [AppArmor security profiles for Docker](../security/apparmor.md)
<add>* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e) | 12 |
Javascript | Javascript | set initial opacity based on style | f66fba83cf34424128378ae740597c679e0ad8e8 | <ide><path>Libraries/Components/Touchable/TouchableOpacity.js
<ide> var TouchableOpacity = React.createClass({
<ide> getInitialState: function() {
<ide> return {
<ide> ...this.touchableGetInitialState(),
<del> anim: new Animated.Value(1),
<add> anim: new Animated.Value(this._getChildStyleOpacityWithDefault()),
<ide> };
<ide> },
<ide>
<ide> var TouchableOpacity = React.createClass({
<ide> },
<ide>
<ide> _opacityInactive: function(duration: number) {
<del> var childStyle = flattenStyle(this.props.style) || {};
<ide> this.setOpacityTo(
<del> childStyle.opacity === undefined ? 1 : childStyle.opacity,
<add> this._getChildStyleOpacityWithDefault(),
<ide> duration
<ide> );
<ide> },
<ide>
<ide> _opacityFocused: function() {
<ide> this.setOpacityTo(this.props.focusedOpacity);
<ide> },
<add>
<add> _getChildStyleOpacityWithDefault: function() {
<add> var childStyle = flattenStyle(this.props.style) || {};
<add> return childStyle.opacity == undefined ? 1 : childStyle.opacity;
<add> },
<ide>
<ide> render: function() {
<ide> return ( | 1 |
Javascript | Javascript | change == to === in linkedlist | 62d8134c50a1f65d6c0ae139e60e0cac701c8a84 | <ide><path>lib/internal/linkedlist.js
<ide> exports.create = create;
<ide>
<ide> // show the most idle item
<ide> function peek(list) {
<del> if (list._idlePrev == list) return null;
<add> if (list._idlePrev === list) return null;
<ide> return list._idlePrev;
<ide> }
<ide> exports.peek = peek;
<ide> function append(list, item) {
<ide> }
<ide>
<ide> // items are linked with _idleNext -> (older) and _idlePrev -> (newer)
<del> // TODO: swap the linkage to match the intuitive older items at "prev"
<add> // Note: This linkage (next being older) may seem counter-intuitive at first.
<ide> item._idleNext = list._idleNext;
<ide> item._idlePrev = list;
<ide> | 1 |
Go | Go | fix package on debugtrap_unsupported.go | 1e78eec8261d93bcf3eeb7aad123032d81fcbb83 | <ide><path>daemon/debugtrap_unsupported.go
<ide> // +build !linux,!darwin,!freebsd
<ide>
<del>package signal
<add>package daemon
<ide>
<ide> func setupSigusr1Trap() {
<ide> return | 1 |
Ruby | Ruby | fix docs about etag | 15365de5a47f57fb1050664532f2f1600237eff1 | <ide><path>actionpack/lib/action_controller/metal/conditional_get.rb
<ide> module ClassMethods
<ide> # def show
<ide> # # Etag will differ even for the same invoice when it's viewed by a different current_user
<ide> # @invoice = Invoice.find(params[:id])
<del> # fresh_when(@invoice)
<add> # fresh_when etag: @invoice
<ide> # end
<ide> # end
<ide> def etag(&etagger) | 1 |
Text | Text | remove unadvisable cluster example | 6fc6ba74f557ef83492be3671fdaf103a58b6fc9 | <ide><path>doc/api/cluster.md
<ide> for (const worker of Object.values(cluster.workers)) {
<ide> }
<ide> ```
<ide>
<del>Using the worker's unique id is the easiest way to locate the worker.
<del>
<del>```js
<del>socket.on('data', (id) => {
<del> const worker = cluster.workers[id];
<del>});
<del>```
<del>
<ide> [Advanced serialization for `child_process`]: child_process.md#advanced-serialization
<ide> [Child Process module]: child_process.md#child_processforkmodulepath-args-options
<ide> [`.fork()`]: #clusterforkenv | 1 |
PHP | PHP | keep the lines sorted | fa6c48d27c54d7495e64a9ace39392d29014e46a | <ide><path>resources/lang/en/validation.php
<ide> ],
<ide> 'not_in' => 'The selected :attribute is invalid.',
<ide> 'numeric' => 'The :attribute must be a number.',
<del> 'regex' => 'The :attribute format is invalid.',
<ide> 'present' => 'The :attribute field must be present.',
<add> 'regex' => 'The :attribute format is invalid.',
<ide> 'required' => 'The :attribute field is required.',
<ide> 'required_if' => 'The :attribute field is required when :other is :value.',
<ide> 'required_unless' => 'The :attribute field is required unless :other is in :values.', | 1 |
Mixed | Javascript | support untagged releases | 58b3ee7a889b62fefb9cc64962819795bebce7b8 | <ide><path>scripts/release/README.md
<ide> The high level process of creating releases is [documented below](#process). Ind
<ide>
<ide> If this is your first time running the release scripts, go to the `scripts/release` directory and run `yarn` to install the dependencies.
<ide>
<del>## Publishing Without Tags
<add>## Publishing Untagged
<ide>
<del>The sections bekow include meaningful `--tags` in the instructions. However, keep in mind that **the `--tags` arguments is optional**, and you can omit it if you don't want to tag the release on npm at all. This can be useful when preparing breaking changes.
<add>The sections bekow include meaningful `--tag` in the instructions.
<add>
<add>However, keep in mind that **the `--tag` arguments is optional**, and you can omit it if you don't want to tag the release on npm at all. This can be useful when preparing breaking changes.
<add>
<add>Because npm requires a tag on publish, the script does it by creating a temporary tag and deleting it afterwards.
<ide>
<ide> ## Publishing Next
<ide>
<ide> scripts/release/prepare-release-from-ci.js --build=124756
<ide>
<ide> Once the build has been checked out and tested locally, you're ready to publish it:
<ide> ```sh
<del>scripts/release/publish.js --tags next
<add>scripts/release/publish.js --tag next
<ide> ```
<ide>
<ide> If the OTP code expires while publishing, re-run this command and answer "y" to the questions about whether it was expected for already published packages.
<ide> scripts/release/prepare-release-from-ci.js --build=124763
<ide> Once the build has been checked out and tested locally, you're ready to publish it. When publishing an experimental release, use the `experimental` tag:
<ide>
<ide> ```sh
<del>scripts/release/publish.js --tags experimental
<add>scripts/release/publish.js --tag experimental
<ide> ```
<ide>
<ide> If the OTP code expires while publishing, re-run this command and answer "y" to the questions about whether it was expected for already published packages.
<ide> This script will prompt you to select stable version numbers for each of the pac
<ide> Once this step is complete, you're ready to publish the release:
<ide>
<ide> ```sh
<del>scripts/release/publish.js --tags latest
<add>scripts/release/publish.js --tag latest
<ide> ```
<ide>
<ide> If the OTP code expires while publishing, re-run this command and answer "y" to the questions about whether it was expected for already published packages.
<ide>
<add>Note that publishing the `latest` tag will always update the `next` tag automatically as well so they're in sync.
<add>
<ide> After successfully publishing the release, follow the on-screen instructions to ensure that all of the appropriate post-release steps are executed.
<ide>
<ide> <sup>1: You can omit the `version` param if you just want to promote the latest "next" candidate to stable.</sup>
<ide> Upon completion, this script provides instructions for tagging the Git commit th
<ide> **Specify a `--dry` flag when running this script if you want to skip the NPM-publish step.** In this event, the script will print the NPM commands but it will not actually run them.
<ide>
<ide> #### Example usage
<del>To publish a release to NPM as both `next` and `latest`:
<add>To publish a release to NPM as `latest`:
<ide> ```sh
<del>scripts/release/publish.js --tags latest
<add>scripts/release/publish.js --tag latest
<ide> ```
<add>
<add>Note that publishing the `latest` tag will always update the `next` tag automatically as well so they're in sync.
<ide><path>scripts/release/publish-commands/confirm-skipped-packages.js
<ide> const clear = require('clear');
<ide> const {confirm} = require('../utils');
<ide> const theme = require('../theme');
<ide>
<del>const run = async ({cwd, packages, skipPackages, tags}) => {
<add>const run = async ({cwd, packages, skipPackages}) => {
<ide> if (skipPackages.length === 0) {
<ide> return;
<ide> }
<add><path>scripts/release/publish-commands/confirm-version-and-tag.js
<del><path>scripts/release/publish-commands/confirm-version-and-tags.js
<ide> const {join} = require('path');
<ide> const {confirm} = require('../utils');
<ide> const theme = require('../theme');
<ide>
<del>const run = async ({cwd, packages, tags}) => {
<add>const run = async ({cwd, packages, tag}) => {
<ide> clear();
<ide>
<del> if (tags.length === 0) {
<del> console.log(
<del> theme`{spinnerSuccess ✓} You are about the publish the following packages without any tags:`
<del> );
<del> } else if (tags.length === 1) {
<del> console.log(
<del> theme`{spinnerSuccess ✓} You are about the publish the following packages under the tag {tag ${tags}}:`
<del> );
<del> } else {
<del> console.log(
<del> theme`{spinnerSuccess ✓} You are about the publish the following packages under the tags {tag ${tags.join(
<del> ', '
<del> )}}:`
<del> );
<del> }
<add> // All latest releases are auto-tagged as next too by the script.
<add> let tags = tag === 'latest' ? ['latest', 'next'] : [tag];
<add> console.log(
<add> theme`{spinnerSuccess ✓} You are about the publish the following packages under the tag {tag ${tags.join(
<add> ', '
<add> )}}:`
<add> );
<ide>
<ide> for (let i = 0; i < packages.length; i++) {
<ide> const packageName = packages[i];
<ide><path>scripts/release/publish-commands/parse-params.js
<ide> const paramDefinitions = [
<ide> defaultValue: false,
<ide> },
<ide> {
<del> name: 'tags',
<add> name: 'tag',
<ide> type: String,
<del> multiple: true,
<del> description: 'NPM tags to point to the new release.',
<add> description: 'NPM tag to point to the new release.',
<add> defaultValue: 'untagged',
<ide> },
<ide> {
<ide> name: 'skipPackages',
<ide> const paramDefinitions = [
<ide>
<ide> module.exports = () => {
<ide> const params = commandLineArgs(paramDefinitions);
<del> if (!params.tags || !params.tags.length) {
<del> params.tags = [];
<add> switch (params.tag) {
<add> case 'latest':
<add> case 'next':
<add> case 'experimental':
<add> case 'untagged':
<add> break;
<add> default:
<add> console.error('Unknown tag: "' + params.tag + '"');
<add> process.exit(1);
<add> break;
<ide> }
<ide> splitCommaParams(params.skipPackages);
<del> splitCommaParams(params.tags);
<ide> return params;
<ide> };
<ide><path>scripts/release/publish-commands/print-follow-up-instructions.js
<ide> const {join} = require('path');
<ide> const theme = require('../theme');
<ide> const {execRead} = require('../utils');
<ide>
<del>const run = async ({cwd, packages, tags}) => {
<add>const run = async ({cwd, packages, tag}) => {
<ide> // All packages are built from a single source revision,
<ide> // so it is safe to read build info from any one of them.
<ide> const arbitraryPackageName = packages[0];
<ide> const run = async ({cwd, packages, tags}) => {
<ide>
<ide> clear();
<ide>
<del> if (tags.length === 1 && tags[0] === 'next') {
<add> if (tag === 'next') {
<ide> console.log(
<ide> theme`{header A "next" release} {version ${version}} {header has been published!}`
<ide> );
<ide> const run = async ({cwd, packages, tags}) => {
<ide> theme.caution`The release has been published but you're not done yet!`
<ide> );
<ide>
<del> if (tags.includes('latest')) {
<add> if (tag === 'latest') {
<ide> console.log();
<ide> console.log(
<ide> theme.header`Please review and commit all local, staged changes.`
<ide><path>scripts/release/publish-commands/publish-to-npm.js
<ide> const {join} = require('path');
<ide> const {confirm, execRead} = require('../utils');
<ide> const theme = require('../theme');
<ide>
<del>const run = async ({cwd, dry, packages, tags}, otp) => {
<add>const run = async ({cwd, dry, packages, tag}, otp) => {
<ide> clear();
<ide>
<ide> for (let i = 0; i < packages.length; i++) {
<ide> const run = async ({cwd, dry, packages, tags}, otp) => {
<ide>
<ide> // Publish the package and tag it.
<ide> if (!dry) {
<del> await exec(`npm publish --tag=${tags[0]} --otp=${otp}`, {
<add> await exec(`npm publish --tag=${tag} --otp=${otp}`, {
<ide> cwd: packagePath,
<ide> });
<ide> }
<ide> console.log(theme.command(` cd ${packagePath}`));
<del> console.log(theme.command(` npm publish --tag=${tags[0]} --otp=${otp}`));
<add> console.log(theme.command(` npm publish --tag=${tag} --otp=${otp}`));
<ide>
<del> for (let j = 1; j < tags.length; j++) {
<add> if (tag === 'latest') {
<add> // Whenever we publish latest, also tag "next" automatically so they're in sync.
<ide> if (!dry) {
<ide> await exec(
<del> `npm dist-tag add ${packageName}@${version} ${tags[j]} --otp=${otp}`,
<del> {cwd: packagePath}
<add> `npm dist-tag add ${packageName}@${version} next --otp=${otp}`
<ide> );
<ide> }
<ide> console.log(
<ide> theme.command(
<del> ` npm dist-tag add ${packageName}@${version} ${tags[j]} --otp=${otp}`
<add> ` npm dist-tag add ${packageName}@${version} next --otp=${otp}`
<ide> )
<ide> );
<add> } else if (tag === 'untagged') {
<add> // npm doesn't let us publish without a tag at all,
<add> // so for one-off publishes we clean it up ourselves.
<add> if (!dry) {
<add> await exec(`npm dist-tag rm ${packageName}@untagged --otp=${otp}`);
<add> }
<add> console.log(
<add> theme.command(`npm dist-tag rm ${packageName}@untagged --otp=${otp}`)
<add> );
<ide> }
<ide> }
<ide> }
<ide><path>scripts/release/publish-commands/update-stable-version-numbers.js
<ide> const {readFileSync, writeFileSync} = require('fs');
<ide> const {readJson, writeJson} = require('fs-extra');
<ide> const {join} = require('path');
<ide>
<del>const run = async ({cwd, packages, skipPackages, tags}) => {
<del> if (!tags.includes('latest')) {
<add>const run = async ({cwd, packages, skipPackages, tag}) => {
<add> if (tag !== 'latest') {
<ide> // Don't update version numbers for alphas.
<ide> return;
<ide> }
<ide><path>scripts/release/publish-commands/validate-tag.js
<add>#!/usr/bin/env node
<add>
<add>'use strict';
<add>
<add>const {readJson} = require('fs-extra');
<add>const {join} = require('path');
<add>const theme = require('../theme');
<add>
<add>const run = async ({cwd, packages, tag}) => {
<add> // Prevent a "next" release from ever being published as @latest
<add> // All canaries share a version number, so it's okay to check any of them.
<add> const arbitraryPackageName = packages[0];
<add> const packageJSONPath = join(
<add> cwd,
<add> 'build',
<add> 'node_modules',
<add> arbitraryPackageName,
<add> 'package.json'
<add> );
<add> const {version} = await readJson(packageJSONPath);
<add> const isExperimentalVersion = version.indexOf('experimental') !== -1;
<add> if (version.indexOf('0.0.0') === 0) {
<add> if (tag === 'latest') {
<add> if (isExperimentalVersion) {
<add> console.log(
<add> theme`{error Experimental release} {version ${version}} {error cannot be tagged as} {tag latest}`
<add> );
<add> } else {
<add> console.log(
<add> theme`{error Next release} {version ${version}} {error cannot be tagged as} {tag latest}`
<add> );
<add> }
<add> process.exit(1);
<add> } else if (tag === 'next' && isExperimentalVersion) {
<add> console.log(
<add> theme`{error Experimental release} {version ${version}} {error cannot be tagged as} {tag next}`
<add> );
<add> process.exit(1);
<add> } else if (tag === 'experimental' && !isExperimentalVersion) {
<add> console.log(
<add> theme`{error Next release} {version ${version}} {error cannot be tagged as} {tag experimental}`
<add> );
<add> process.exit(1);
<add> }
<add> } else {
<add> if (tag !== 'latest') {
<add> console.log(
<add> theme`{error Stable release} {version ${version}} {error cannot be tagged as} {tag ${tag}}`
<add> );
<add> process.exit(1);
<add> }
<add> }
<add>};
<add>
<add>module.exports = run;
<ide><path>scripts/release/publish-commands/validate-tags.js
<del>#!/usr/bin/env node
<del>
<del>'use strict';
<del>
<del>const {readJson} = require('fs-extra');
<del>const {join} = require('path');
<del>const theme = require('../theme');
<del>
<del>const run = async ({cwd, packages, tags}) => {
<del> // Prevent a "next" release from ever being published as @latest
<del> // All canaries share a version number, so it's okay to check any of them.
<del> const arbitraryPackageName = packages[0];
<del> const packageJSONPath = join(
<del> cwd,
<del> 'build',
<del> 'node_modules',
<del> arbitraryPackageName,
<del> 'package.json'
<del> );
<del> const {version} = await readJson(packageJSONPath);
<del> if (version.indexOf('0.0.0') === 0) {
<del> if (tags.includes('latest')) {
<del> console.log(
<del> theme`{error Next release} {version ${version}} {error cannot be tagged as} {tag latest}`
<del> );
<del> process.exit(1);
<del> }
<del> } else {
<del> if (tags.includes('next')) {
<del> console.log(
<del> theme`{error Stable release} {version ${version}} {error cannot be tagged as} {tag next}`
<del> );
<del> process.exit(1);
<del> }
<del> }
<del>};
<del>
<del>module.exports = run;
<ide><path>scripts/release/publish.js
<ide> const theme = require('./theme');
<ide>
<ide> const checkNPMPermissions = require('./publish-commands/check-npm-permissions');
<ide> const confirmSkippedPackages = require('./publish-commands/confirm-skipped-packages');
<del>const confirmVersionAndTags = require('./publish-commands/confirm-version-and-tags');
<add>const confirmVersionAndTag = require('./publish-commands/confirm-version-and-tag');
<ide> const parseParams = require('./publish-commands/parse-params');
<ide> const printFollowUpInstructions = require('./publish-commands/print-follow-up-instructions');
<ide> const promptForOTP = require('./publish-commands/prompt-for-otp');
<ide> const publishToNPM = require('./publish-commands/publish-to-npm');
<ide> const updateStableVersionNumbers = require('./publish-commands/update-stable-version-numbers');
<del>const validateTags = require('./publish-commands/validate-tags');
<add>const validateTag = require('./publish-commands/validate-tag');
<ide> const validateSkipPackages = require('./publish-commands/validate-skip-packages');
<ide>
<ide> const run = async () => {
<ide> const run = async () => {
<ide> }
<ide> });
<ide>
<del> await validateTags(params);
<add> await validateTag(params);
<ide> await confirmSkippedPackages(params);
<del> await confirmVersionAndTags(params);
<add> await confirmVersionAndTag(params);
<ide> await validateSkipPackages(params);
<ide> await checkNPMPermissions(params);
<ide> const otp = await promptForOTP(params); | 10 |
Javascript | Javascript | emit didfocus after routestack reset | 7fdabd8f14e0aefc4efb7b05c0cb5d019f2a699f | <ide><path>Libraries/CustomComponents/Navigator/Navigator.js
<ide> var Navigator = React.createClass({
<ide> }, () => {
<ide> this._handleSpringUpdate();
<ide> this._navBar && this._navBar.immediatelyRefresh();
<add> this._emitDidFocus(this.state.routeStack[this.state.presentedIndex]);
<ide> });
<ide> },
<ide> | 1 |
Text | Text | add the link to editor setup tutorials | 8ec3f38df60b2dd8a05da88bfdf14aec5707fdba | <ide><path>CONTRIBUTING.md
<ide> Now, the environment setup is complete. You are ready to run the tests.
<ide>
<ide> You may modify the Dockerfile to your specific needs, like installing your own
<ide> dev tools. You may also mount more volumes with the `-v` option, like your SSH
<del>credentials. Besides the editors running in the shell, many popular IDEs today
<del>also support developing in a container. You may use these IDEs with the
<del>Dockerfile as well.
<add>credentials.
<add>
<add>Many popular editors today support developing in a container. Here is list of
<add>[supported editors](https://discuss.tensorflow.org/t/setup-your-favorite-editor-to-develop-keras)
<add>with setup instructions.
<ide>
<ide> ### Option 2: Setup a local environment
<ide> | 1 |
Python | Python | remove k from order parameter options | 5300d20fec0d3b06c1e7e5623db06c2e6f8eaee9 | <ide><path>numpy/core/_add_newdocs.py
<ide>
<ide> Parameters
<ide> ----------
<del> order : {'C', 'F', 'A', 'K'}, optional
<add> order : {'C', 'F', 'A'}, optional
<ide> Controls the memory layout of the bytes object. 'C' means C-order,
<del> 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
<del> 'C' otherwise. 'K' means match the layout of `a` as closely
<del> as possible. Default is 'C'.
<add> 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
<add> Fortran contiguous, 'C' otherwise. Default is 'C'.
<ide>
<ide> Returns
<ide> ------- | 1 |
Mixed | Javascript | improve mode and flags validation | a13500f5037c1eede3a2d0a0db5fc532c05e14a8 | <ide><path>doc/api/fs.md
<ide> On Linux, positional writes don't work when the file is opened in append mode.
<ide> The kernel ignores the position argument and always appends the data to
<ide> the end of the file.
<ide>
<del>Modifying a file rather than replacing it may require a flags mode of `'r+'`
<del>rather than the default mode `'w'`.
<add>Modifying a file rather than replacing it may require the `flag` option to be
<add>set to `'r+'` rather than the default `'w'`.
<ide>
<ide> The behavior of some flags are platform-specific. As such, opening a directory
<ide> on macOS and Linux with the `'a+'` flag, as in the example below, will return an
<ide><path>lib/fs.js
<ide> const {
<ide> getDirents,
<ide> getOptions,
<ide> getValidatedPath,
<add> getValidMode,
<ide> handleErrorFromBinding,
<ide> nullCheck,
<ide> preprocessSymlinkDestination,
<ide> const {
<ide> } = require('internal/constants');
<ide> const {
<ide> isUint32,
<del> parseMode,
<add> parseFileMode,
<ide> validateBuffer,
<ide> validateInteger,
<ide> validateInt32,
<ide> function access(path, mode, callback) {
<ide> }
<ide>
<ide> path = getValidatedPath(path);
<del>
<del> mode = mode | 0;
<add> mode = getValidMode(mode, 'access');
<ide> const req = new FSReqCallback();
<ide> req.oncomplete = makeCallback(callback);
<ide> binding.access(pathModule.toNamespacedPath(path), mode, req);
<ide> }
<ide>
<ide> function accessSync(path, mode) {
<ide> path = getValidatedPath(path);
<del>
<del> if (mode === undefined)
<del> mode = F_OK;
<del> else
<del> mode = mode | 0;
<add> mode = getValidMode(mode, 'access');
<ide>
<ide> const ctx = { path };
<ide> binding.access(pathModule.toNamespacedPath(path), mode, undefined, ctx);
<ide> function readFile(path, options, callback) {
<ide> }
<ide>
<ide> path = getValidatedPath(path);
<add> const flagsNumber = stringToFlags(options.flags);
<ide> binding.open(pathModule.toNamespacedPath(path),
<del> stringToFlags(options.flag || 'r'),
<add> flagsNumber,
<ide> 0o666,
<ide> req);
<ide> }
<ide> function open(path, flags, mode, callback) {
<ide> } else if (typeof mode === 'function') {
<ide> callback = mode;
<ide> mode = 0o666;
<add> } else {
<add> mode = parseFileMode(mode, 'mode', 0o666);
<ide> }
<ide> const flagsNumber = stringToFlags(flags);
<del> if (arguments.length >= 4) {
<del> mode = parseMode(mode, 'mode', 0o666);
<del> }
<ide> callback = makeCallback(callback);
<ide>
<ide> const req = new FSReqCallback();
<ide> function open(path, flags, mode, callback) {
<ide>
<ide> function openSync(path, flags, mode) {
<ide> path = getValidatedPath(path);
<del> const flagsNumber = stringToFlags(flags || 'r');
<del> mode = parseMode(mode, 'mode', 0o666);
<add> const flagsNumber = stringToFlags(flags);
<add> mode = parseFileMode(mode, 'mode', 0o666);
<ide>
<ide> const ctx = { path };
<ide> const result = binding.open(pathModule.toNamespacedPath(path),
<ide> function fsyncSync(fd) {
<ide> }
<ide>
<ide> function mkdir(path, options, callback) {
<add> let mode = 0o777;
<add> let recursive = false;
<ide> if (typeof options === 'function') {
<ide> callback = options;
<del> options = {};
<ide> } else if (typeof options === 'number' || typeof options === 'string') {
<del> options = { mode: options };
<add> mode = options;
<add> } else if (options) {
<add> if (options.recursive !== undefined)
<add> recursive = options.recursive;
<add> if (options.mode !== undefined)
<add> mode = options.mode;
<ide> }
<del> const {
<del> recursive = false,
<del> mode = 0o777
<del> } = options || {};
<ide> callback = makeCallback(callback);
<ide> path = getValidatedPath(path);
<ide>
<ide> function mkdir(path, options, callback) {
<ide> const req = new FSReqCallback();
<ide> req.oncomplete = callback;
<ide> binding.mkdir(pathModule.toNamespacedPath(path),
<del> parseMode(mode, 'mode', 0o777), recursive, req);
<add> parseFileMode(mode, 'mode'), recursive, req);
<ide> }
<ide>
<ide> function mkdirSync(path, options) {
<add> let mode = 0o777;
<add> let recursive = false;
<ide> if (typeof options === 'number' || typeof options === 'string') {
<del> options = { mode: options };
<add> mode = options;
<add> } else if (options) {
<add> if (options.recursive !== undefined)
<add> recursive = options.recursive;
<add> if (options.mode !== undefined)
<add> mode = options.mode;
<ide> }
<del> const {
<del> recursive = false,
<del> mode = 0o777
<del> } = options || {};
<del>
<ide> path = getValidatedPath(path);
<ide> if (typeof recursive !== 'boolean')
<ide> throw new ERR_INVALID_ARG_TYPE('recursive', 'boolean', recursive);
<ide>
<ide> const ctx = { path };
<ide> binding.mkdir(pathModule.toNamespacedPath(path),
<del> parseMode(mode, 'mode', 0o777), recursive, undefined,
<add> parseFileMode(mode, 'mode'), recursive, undefined,
<ide> ctx);
<ide> handleErrorFromBinding(ctx);
<ide> }
<ide> function unlinkSync(path) {
<ide>
<ide> function fchmod(fd, mode, callback) {
<ide> validateInt32(fd, 'fd', 0);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide> callback = makeCallback(callback);
<ide>
<ide> const req = new FSReqCallback();
<ide> function fchmod(fd, mode, callback) {
<ide>
<ide> function fchmodSync(fd, mode) {
<ide> validateInt32(fd, 'fd', 0);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide> const ctx = {};
<ide> binding.fchmod(fd, mode, undefined, ctx);
<ide> handleErrorFromBinding(ctx);
<ide> function lchmodSync(path, mode) {
<ide>
<ide> function chmod(path, mode, callback) {
<ide> path = getValidatedPath(path);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide> callback = makeCallback(callback);
<ide>
<ide> const req = new FSReqCallback();
<ide> function chmod(path, mode, callback) {
<ide>
<ide> function chmodSync(path, mode) {
<ide> path = getValidatedPath(path);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide>
<ide> const ctx = { path };
<ide> binding.chmod(pathModule.toNamespacedPath(path), mode, undefined, ctx);
<ide> function mkdtempSync(prefix, options) {
<ide> }
<ide>
<ide>
<del>function copyFile(src, dest, flags, callback) {
<del> if (typeof flags === 'function') {
<del> callback = flags;
<del> flags = 0;
<add>function copyFile(src, dest, mode, callback) {
<add> if (typeof mode === 'function') {
<add> callback = mode;
<add> mode = 0;
<ide> } else if (typeof callback !== 'function') {
<ide> throw new ERR_INVALID_CALLBACK(callback);
<ide> }
<ide> function copyFile(src, dest, flags, callback) {
<ide>
<ide> src = pathModule._makeLong(src);
<ide> dest = pathModule._makeLong(dest);
<del> flags = flags | 0;
<add> mode = getValidMode(mode, 'copyFile');
<ide> const req = new FSReqCallback();
<ide> req.oncomplete = makeCallback(callback);
<del> binding.copyFile(src, dest, flags, req);
<add> binding.copyFile(src, dest, mode, req);
<ide> }
<ide>
<ide>
<del>function copyFileSync(src, dest, flags) {
<add>function copyFileSync(src, dest, mode) {
<ide> src = getValidatedPath(src, 'src');
<ide> dest = getValidatedPath(dest, 'dest');
<ide>
<ide> const ctx = { path: src, dest }; // non-prefixed
<ide>
<ide> src = pathModule._makeLong(src);
<ide> dest = pathModule._makeLong(dest);
<del> flags = flags | 0;
<del> binding.copyFile(src, dest, flags, undefined, ctx);
<add> mode = getValidMode(mode, 'copyFile');
<add> binding.copyFile(src, dest, mode, undefined, ctx);
<ide> handleErrorFromBinding(ctx);
<ide> }
<ide>
<ide><path>lib/internal/bootstrap/switches/does_own_process_state.js
<ide> if (credentials.implementsPosixCredentials) {
<ide> // ---- compare the setups side-by-side -----
<ide>
<ide> const {
<del> parseMode,
<add> parseFileMode,
<ide> validateString
<ide> } = require('internal/validators');
<ide>
<ide> function wrappedChdir(directory) {
<ide>
<ide> function wrappedUmask(mask) {
<ide> if (mask !== undefined) {
<del> mask = parseMode(mask, 'mask');
<add> mask = parseFileMode(mask, 'mask');
<ide> }
<ide> return rawMethods.umask(mask);
<ide> }
<ide><path>lib/internal/fs/promises.js
<ide> const {
<ide> getOptions,
<ide> getStatsFromBinding,
<ide> getValidatedPath,
<add> getValidMode,
<ide> nullCheck,
<ide> preprocessSymlinkDestination,
<ide> stringToFlags,
<ide> const {
<ide> } = require('internal/fs/utils');
<ide> const { opendir } = require('internal/fs/dir');
<ide> const {
<del> parseMode,
<add> parseFileMode,
<ide> validateBuffer,
<ide> validateInteger,
<ide> validateUint32
<ide> async function readFileHandle(filehandle, options) {
<ide> async function access(path, mode = F_OK) {
<ide> path = getValidatedPath(path);
<ide>
<del> mode = mode | 0;
<add> mode = getValidMode(mode, 'access');
<ide> return binding.access(pathModule.toNamespacedPath(path), mode,
<ide> kUsePromises);
<ide> }
<ide>
<del>async function copyFile(src, dest, flags) {
<add>async function copyFile(src, dest, mode) {
<ide> src = getValidatedPath(src, 'src');
<ide> dest = getValidatedPath(dest, 'dest');
<del> flags = flags | 0;
<add> mode = getValidMode(mode, 'copyFile');
<ide> return binding.copyFile(pathModule.toNamespacedPath(src),
<ide> pathModule.toNamespacedPath(dest),
<del> flags, kUsePromises);
<add> mode,
<add> kUsePromises);
<ide> }
<ide>
<ide> // Note that unlike fs.open() which uses numeric file descriptors,
<ide> // fsPromises.open() uses the fs.FileHandle class.
<ide> async function open(path, flags, mode) {
<ide> path = getValidatedPath(path);
<del> if (arguments.length < 2) flags = 'r';
<ide> const flagsNumber = stringToFlags(flags);
<del> mode = parseMode(mode, 'mode', 0o666);
<add> mode = parseFileMode(mode, 'mode', 0o666);
<ide> return new FileHandle(
<ide> await binding.openFileHandle(pathModule.toNamespacedPath(path),
<ide> flagsNumber, mode, kUsePromises));
<ide> async function mkdir(path, options) {
<ide> throw new ERR_INVALID_ARG_TYPE('recursive', 'boolean', recursive);
<ide>
<ide> return binding.mkdir(pathModule.toNamespacedPath(path),
<del> parseMode(mode, 'mode', 0o777), recursive,
<add> parseFileMode(mode, 'mode', 0o777), recursive,
<ide> kUsePromises);
<ide> }
<ide>
<ide> async function unlink(path) {
<ide>
<ide> async function fchmod(handle, mode) {
<ide> validateFileHandle(handle);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide> return binding.fchmod(handle.fd, mode, kUsePromises);
<ide> }
<ide>
<ide> async function chmod(path, mode) {
<ide> path = getValidatedPath(path);
<del> mode = parseMode(mode, 'mode');
<add> mode = parseFileMode(mode, 'mode');
<ide> return binding.chmod(pathModule.toNamespacedPath(path), mode, kUsePromises);
<ide> }
<ide>
<ide><path>lib/internal/fs/utils.js
<ide> const {
<ide> Error,
<ide> Number,
<ide> NumberIsFinite,
<add> MathMin,
<ide> ObjectSetPrototypeOf,
<ide> ReflectOwnKeys,
<ide> Symbol,
<ide> const {
<ide> const pathModule = require('path');
<ide> const kType = Symbol('type');
<ide> const kStats = Symbol('stats');
<add>const assert = require('internal/assert');
<ide>
<ide> const {
<add> F_OK = 0,
<add> W_OK = 0,
<add> R_OK = 0,
<add> X_OK = 0,
<add> COPYFILE_EXCL,
<add> COPYFILE_FICLONE,
<add> COPYFILE_FICLONE_FORCE,
<ide> O_APPEND,
<ide> O_CREAT,
<ide> O_EXCL,
<ide> const {
<ide> UV_DIRENT_BLOCK
<ide> } = internalBinding('constants').fs;
<ide>
<add>// The access modes can be any of F_OK, R_OK, W_OK or X_OK. Some might not be
<add>// available on specific systems. They can be used in combination as well
<add>// (F_OK | R_OK | W_OK | X_OK).
<add>const kMinimumAccessMode = MathMin(F_OK, W_OK, R_OK, X_OK);
<add>const kMaximumAccessMode = F_OK | W_OK | R_OK | X_OK;
<add>
<add>const kDefaultCopyMode = 0;
<add>// The copy modes can be any of COPYFILE_EXCL, COPYFILE_FICLONE or
<add>// COPYFILE_FICLONE_FORCE. They can be used in combination as well
<add>// (COPYFILE_EXCL | COPYFILE_FICLONE | COPYFILE_FICLONE_FORCE).
<add>const kMinimumCopyMode = MathMin(
<add> kDefaultCopyMode,
<add> COPYFILE_EXCL,
<add> COPYFILE_FICLONE,
<add> COPYFILE_FICLONE_FORCE
<add>);
<add>const kMaximumCopyMode = COPYFILE_EXCL |
<add> COPYFILE_FICLONE |
<add> COPYFILE_FICLONE_FORCE;
<add>
<ide> const isWindows = process.platform === 'win32';
<ide>
<ide> let fs;
<ide> function stringToFlags(flags) {
<ide> return flags;
<ide> }
<ide>
<add> if (flags == null) {
<add> return O_RDONLY;
<add> }
<add>
<ide> switch (flags) {
<ide> case 'r' : return O_RDONLY;
<ide> case 'rs' : // Fall through.
<ide> const validateRmdirOptions = hideStackFrames((options) => {
<ide> return options;
<ide> });
<ide>
<add>const getValidMode = hideStackFrames((mode, type) => {
<add> let min = kMinimumAccessMode;
<add> let max = kMaximumAccessMode;
<add> let def = F_OK;
<add> if (type === 'copyFile') {
<add> min = kMinimumCopyMode;
<add> max = kMaximumCopyMode;
<add> def = mode || kDefaultCopyMode;
<add> } else {
<add> assert(type === 'access');
<add> }
<add> if (mode == null) {
<add> return def;
<add> }
<add> if (Number.isInteger(mode) && mode >= min && mode <= max) {
<add> return mode;
<add> }
<add> if (typeof mode !== 'number') {
<add> throw new ERR_INVALID_ARG_TYPE('mode', 'integer', mode);
<add> }
<add> throw new ERR_OUT_OF_RANGE(
<add> 'mode', `an integer >= ${min} && <= ${max}`, mode);
<add>});
<ide>
<ide> module.exports = {
<ide> assertEncoding,
<ide> module.exports = {
<ide> getDirents,
<ide> getOptions,
<ide> getValidatedPath,
<add> getValidMode,
<ide> handleErrorFromBinding,
<ide> nullCheck,
<ide> preprocessSymlinkDestination,
<ide><path>lib/internal/validators.js
<ide> const modeDesc = 'must be a 32-bit unsigned integer or an octal string';
<ide> * @param {number} def If specified, will be returned for invalid values
<ide> * @returns {number}
<ide> */
<del>function parseMode(value, name, def) {
<add>function parseFileMode(value, name, def) {
<add> if (value == null && def !== undefined) {
<add> return def;
<add> }
<add>
<ide> if (isUint32(value)) {
<ide> return value;
<ide> }
<ide> function parseMode(value, name, def) {
<ide> return parseInt(value, 8);
<ide> }
<ide>
<del> if (def !== undefined && value == null) {
<del> return def;
<del> }
<del>
<ide> throw new ERR_INVALID_ARG_VALUE(name, value, modeDesc);
<ide> }
<ide>
<ide> function validateEncoding(data, encoding) {
<ide> module.exports = {
<ide> isInt32,
<ide> isUint32,
<del> parseMode,
<add> parseFileMode,
<ide> validateBuffer,
<ide> validateEncoding,
<ide> validateInteger,
<ide><path>test/parallel/test-fs-access.js
<ide> fs.accessSync(__filename);
<ide> const mode = fs.F_OK | fs.R_OK | fs.W_OK;
<ide> fs.accessSync(readWriteFile, mode);
<ide>
<add>// Invalid modes should throw.
<add>[
<add> false,
<add> 1n,
<add> { [Symbol.toPrimitive]() { return fs.R_OK; } },
<add> [1],
<add> 'r'
<add>].forEach((mode, i) => {
<add> console.log(mode, i);
<add> assert.throws(
<add> () => fs.access(readWriteFile, mode, common.mustNotCall()),
<add> {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> message: /"mode" argument.+integer/
<add> }
<add> );
<add> assert.throws(
<add> () => fs.accessSync(readWriteFile, mode),
<add> {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> message: /"mode" argument.+integer/
<add> }
<add> );
<add>});
<add>
<add>// Out of range modes should throw
<add>[
<add> -1,
<add> 8,
<add> Infinity,
<add> NaN
<add>].forEach((mode, i) => {
<add> console.log(mode, i);
<add> assert.throws(
<add> () => fs.access(readWriteFile, mode, common.mustNotCall()),
<add> {
<add> code: 'ERR_OUT_OF_RANGE',
<add> message: /"mode".+It must be an integer >= 0 && <= 7/
<add> }
<add> );
<add> assert.throws(
<add> () => fs.accessSync(readWriteFile, mode),
<add> {
<add> code: 'ERR_OUT_OF_RANGE',
<add> message: /"mode".+It must be an integer >= 0 && <= 7/
<add> }
<add> );
<add>});
<add>
<ide> assert.throws(
<ide> () => { fs.accessSync(doesNotExist); },
<ide> (err) => {
<ide><path>test/parallel/test-fs-copyfile.js
<ide> assert.throws(() => {
<ide> () => fs.copyFile(i, dest, common.mustNotCall()),
<ide> {
<ide> code: 'ERR_INVALID_ARG_TYPE',
<del> name: 'TypeError'
<add> name: 'TypeError',
<add> message: /src/
<ide> }
<ide> );
<ide> assert.throws(
<ide> () => fs.copyFile(src, i, common.mustNotCall()),
<ide> {
<ide> code: 'ERR_INVALID_ARG_TYPE',
<del> name: 'TypeError'
<add> name: 'TypeError',
<add> message: /dest/
<ide> }
<ide> );
<ide> assert.throws(
<ide> () => fs.copyFileSync(i, dest),
<ide> {
<ide> code: 'ERR_INVALID_ARG_TYPE',
<del> name: 'TypeError'
<add> name: 'TypeError',
<add> message: /src/
<ide> }
<ide> );
<ide> assert.throws(
<ide> () => fs.copyFileSync(src, i),
<ide> {
<ide> code: 'ERR_INVALID_ARG_TYPE',
<del> name: 'TypeError'
<add> name: 'TypeError',
<add> message: /dest/
<ide> }
<ide> );
<ide> });
<add>
<add>assert.throws(() => {
<add> fs.copyFileSync(src, dest, 'r');
<add>}, {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> name: 'TypeError',
<add> message: /mode/
<add>});
<add>
<add>assert.throws(() => {
<add> fs.copyFileSync(src, dest, 8);
<add>}, {
<add> code: 'ERR_OUT_OF_RANGE',
<add> name: 'RangeError',
<add> message: 'The value of "mode" is out of range. It must be an integer ' +
<add> '>= 0 && <= 7. Received 8'
<add>});
<add>
<add>assert.throws(() => {
<add> fs.copyFile(src, dest, 'r', common.mustNotCall());
<add>}, {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> name: 'TypeError',
<add> message: /mode/
<add>});
<ide><path>test/parallel/test-fs-error-messages.js
<ide> if (!common.isAIX) {
<ide> );
<ide> }
<ide>
<del>// Check copyFile with invalid flags.
<add>// Check copyFile with invalid modes.
<ide> {
<ide> const validateError = {
<del> // TODO: Make sure the error message always also contains the src.
<del> message: `EINVAL: invalid argument, copyfile -> '${nonexistentFile}'`,
<del> errno: UV_EINVAL,
<del> code: 'EINVAL',
<del> syscall: 'copyfile'
<add> message: /"mode".+must be an integer >= 0 && <= 7\. Received -1/,
<add> code: 'ERR_OUT_OF_RANGE'
<ide> };
<ide>
<del> fs.copyFile(existingFile, nonexistentFile, -1,
<del> common.expectsError(validateError));
<del>
<del> validateError.message = 'EINVAL: invalid argument, copyfile ' +
<del> `'${existingFile}' -> '${nonexistentFile}'`;
<add> assert.throws(
<add> () => fs.copyFile(existingFile, nonexistentFile, -1, () => {}),
<add> validateError
<add> );
<ide> assert.throws(
<ide> () => fs.copyFileSync(existingFile, nonexistentFile, -1),
<ide> validateError
<ide><path>test/parallel/test-fs-open-flags.js
<ide> assert.throws(
<ide> { code: 'ERR_INVALID_OPT_VALUE', name: 'TypeError' }
<ide> );
<ide>
<del>assert.throws(
<del> () => stringToFlags(null),
<del> { code: 'ERR_INVALID_OPT_VALUE', name: 'TypeError' }
<del>);
<del>
<ide> if (common.isLinux || common.isOSX) {
<ide> const tmpdir = require('../common/tmpdir');
<ide> tmpdir.refresh();
<ide><path>test/parallel/test-fs-promises-file-handle-sync.js
<ide> const tmpdir = require('../common/tmpdir');
<ide> const { access, copyFile, open } = require('fs').promises;
<ide> const path = require('path');
<ide>
<del>async function validateSync() {
<add>async function validate() {
<ide> tmpdir.refresh();
<ide> const dest = path.resolve(tmpdir.path, 'baz.js');
<add> await assert.rejects(
<add> copyFile(fixtures.path('baz.js'), dest, 'r'),
<add> {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> message: /mode.*integer.*string/
<add> }
<add> );
<ide> await copyFile(fixtures.path('baz.js'), dest);
<del> await access(dest, 'r');
<add> await assert.rejects(
<add> access(dest, 'r'),
<add> { code: 'ERR_INVALID_ARG_TYPE', message: /mode/ }
<add> );
<add> await access(dest);
<ide> const handle = await open(dest, 'r+');
<ide> await handle.datasync();
<ide> await handle.sync();
<ide> async function validateSync() {
<ide> await handle.close();
<ide> }
<ide>
<del>validateSync();
<add>validate();
<ide><path>test/parallel/test-fs-promises.js
<ide> assert.strictEqual(
<ide> );
<ide>
<ide> {
<del> access(__filename, 'r')
<add> access(__filename, 0)
<ide> .then(common.mustCall());
<ide>
<del> access('this file does not exist', 'r')
<del> .then(common.mustNotCall())
<del> .catch(common.expectsError({
<add> assert.rejects(
<add> access('this file does not exist', 0),
<add> {
<ide> code: 'ENOENT',
<ide> name: 'Error',
<del> message:
<del> /^ENOENT: no such file or directory, access/
<del> }));
<add> message: /^ENOENT: no such file or directory, access/
<add> }
<add> );
<add>
<add> assert.rejects(
<add> access(__filename, 8),
<add> {
<add> code: 'ERR_OUT_OF_RANGE',
<add> message: /"mode".*must be an integer >= 0 && <= 7\. Received 8$/
<add> }
<add> );
<add>
<add> assert.rejects(
<add> access(__filename, { [Symbol.toPrimitive]() { return 5; } }),
<add> {
<add> code: 'ERR_INVALID_ARG_TYPE',
<add> message: /"mode" argument.+integer\. Received an instance of Object$/
<add> }
<add> );
<ide> }
<ide>
<ide> function verifyStatObject(stat) {
<ide> function verifyStatObject(stat) {
<ide>
<ide> async function getHandle(dest) {
<ide> await copyFile(fixtures.path('baz.js'), dest);
<del> await access(dest, 'r');
<add> await access(dest);
<ide>
<ide> return open(dest, 'r+');
<ide> } | 12 |
Go | Go | use a structure to keep the allocated ips pool | 7e95b13460a58db75630d2d795482f39c68762c2 | <ide><path>daemon/networkdriver/bridge/driver.go
<ide> var (
<ide>
<ide> defaultBindingIP = net.ParseIP("0.0.0.0")
<ide> currentInterfaces = ifaces{c: make(map[string]*networkInterface)}
<add> ipAllocator = ipallocator.New()
<ide> )
<ide>
<ide> func InitDriver(job *engine.Job) engine.Status {
<ide> func InitDriver(job *engine.Job) engine.Status {
<ide> return job.Error(err)
<ide> }
<ide> log.Debugf("Subnet: %v", subnet)
<del> if err := ipallocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
<add> if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil {
<ide> return job.Error(err)
<ide> }
<ide> }
<ide> func InitDriver(job *engine.Job) engine.Status {
<ide> return job.Error(err)
<ide> }
<ide> log.Debugf("Subnet: %v", subnet)
<del> if err := ipallocator.RegisterSubnet(subnet, subnet); err != nil {
<add> if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil {
<ide> return job.Error(err)
<ide> }
<ide> globalIPv6Network = subnet
<ide> }
<ide>
<ide> // Block BridgeIP in IP allocator
<del> ipallocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
<add> ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
<ide>
<ide> // https://github.com/docker/docker/issues/2768
<ide> job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
<ide> func Allocate(job *engine.Job) engine.Status {
<ide> globalIPv6 net.IP
<ide> )
<ide>
<del> ip, err = ipallocator.RequestIP(bridgeIPv4Network, requestedIP)
<add> ip, err = ipAllocator.RequestIP(bridgeIPv4Network, requestedIP)
<ide> if err != nil {
<ide> return job.Error(err)
<ide> }
<ide> func Allocate(job *engine.Job) engine.Status {
<ide> }
<ide> }
<ide>
<del> globalIPv6, err = ipallocator.RequestIP(globalIPv6Network, requestedIPv6)
<add> globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, requestedIPv6)
<ide> if err != nil {
<ide> log.Errorf("Allocator: RequestIP v6: %v", err)
<ide> return job.Error(err)
<ide> func Release(job *engine.Job) engine.Status {
<ide> }
<ide> }
<ide>
<del> if err := ipallocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
<add> if err := ipAllocator.ReleaseIP(bridgeIPv4Network, containerInterface.IP); err != nil {
<ide> log.Infof("Unable to release IPv4 %s", err)
<ide> }
<ide> if globalIPv6Network != nil {
<del> if err := ipallocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
<add> if err := ipAllocator.ReleaseIP(globalIPv6Network, containerInterface.IPv6); err != nil {
<ide> log.Infof("Unable to release IPv6 %s", err)
<ide> }
<ide> }
<ide><path>daemon/networkdriver/ipallocator/allocator.go
<ide> var (
<ide> ErrBadSubnet = errors.New("network does not contain specified subnet")
<ide> )
<ide>
<del>var (
<del> lock = sync.Mutex{}
<del> allocatedIPs = networkSet{}
<del>)
<add>type IPAllocator struct {
<add> allocatedIPs networkSet
<add> mutex sync.Mutex
<add>}
<add>
<add>func New() *IPAllocator {
<add> return &IPAllocator{networkSet{}, sync.Mutex{}}
<add>}
<ide>
<ide> // RegisterSubnet registers network in global allocator with bounds
<ide> // defined by subnet. If you want to use network range you must call
<ide> // this method before first RequestIP, otherwise full network range will be used
<del>func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
<del> lock.Lock()
<del> defer lock.Unlock()
<add>func (a *IPAllocator) RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
<add> a.mutex.Lock()
<add> defer a.mutex.Unlock()
<add>
<ide> key := network.String()
<del> if _, ok := allocatedIPs[key]; ok {
<add> if _, ok := a.allocatedIPs[key]; ok {
<ide> return ErrNetworkAlreadyRegistered
<ide> }
<ide> n := newAllocatedMap(network)
<ide> func RegisterSubnet(network *net.IPNet, subnet *net.IPNet) error {
<ide> n.begin.Set(begin)
<ide> n.end.Set(end)
<ide> n.last.Sub(begin, big.NewInt(1))
<del> allocatedIPs[key] = n
<add> a.allocatedIPs[key] = n
<ide> return nil
<ide> }
<ide>
<ide> // RequestIP requests an available ip from the given network. It
<ide> // will return the next available ip if the ip provided is nil. If the
<ide> // ip provided is not nil it will validate that the provided ip is available
<ide> // for use or return an error
<del>func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
<del> lock.Lock()
<del> defer lock.Unlock()
<add>func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
<add> a.mutex.Lock()
<add> defer a.mutex.Unlock()
<add>
<ide> key := network.String()
<del> allocated, ok := allocatedIPs[key]
<add> allocated, ok := a.allocatedIPs[key]
<ide> if !ok {
<ide> allocated = newAllocatedMap(network)
<del> allocatedIPs[key] = allocated
<add> a.allocatedIPs[key] = allocated
<ide> }
<ide>
<ide> if ip == nil {
<ide> func RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) {
<ide>
<ide> // ReleaseIP adds the provided ip back into the pool of
<ide> // available ips to be returned for use.
<del>func ReleaseIP(network *net.IPNet, ip net.IP) error {
<del> lock.Lock()
<del> defer lock.Unlock()
<del> if allocated, exists := allocatedIPs[network.String()]; exists {
<add>func (a *IPAllocator) ReleaseIP(network *net.IPNet, ip net.IP) error {
<add> a.mutex.Lock()
<add> defer a.mutex.Unlock()
<add>
<add> if allocated, exists := a.allocatedIPs[network.String()]; exists {
<ide> delete(allocated.p, ip.String())
<ide> }
<ide> return nil
<ide><path>daemon/networkdriver/ipallocator/allocator_test.go
<ide> import (
<ide> "testing"
<ide> )
<ide>
<del>func reset() {
<del> allocatedIPs = networkSet{}
<del>}
<del>
<ide> func TestConversion(t *testing.T) {
<ide> ip := net.ParseIP("127.0.0.1")
<ide> i := ipToBigInt(ip)
<ide> func TestConversionIPv6(t *testing.T) {
<ide> }
<ide>
<ide> func TestRequestNewIps(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestRequestNewIps(t *testing.T) {
<ide> var err error
<ide>
<ide> for i := 1; i < 10; i++ {
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestRequestNewIps(t *testing.T) {
<ide> }
<ide> }
<ide> value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestRequestNewIps(t *testing.T) {
<ide> }
<ide>
<ide> func TestRequestNewIpV6(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
<ide> Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
<ide> func TestRequestNewIpV6(t *testing.T) {
<ide> var ip net.IP
<ide> var err error
<ide> for i := 1; i < 10; i++ {
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestRequestNewIpV6(t *testing.T) {
<ide> }
<ide> }
<ide> value := bigIntToIP(big.NewInt(0).Add(ipToBigInt(ip), big.NewInt(1))).String()
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestRequestNewIpV6(t *testing.T) {
<ide> }
<ide>
<ide> func TestReleaseIp(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> }
<ide>
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> }
<ide>
<ide> func TestReleaseIpV6(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
<ide> Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
<ide> }
<ide>
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> }
<ide>
<ide> func TestGetReleasedIp(t *testing.T) {
<del> defer reset()
<add> a := New()
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> }
<ide>
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> value := ip.String()
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> for i := 0; i < 253; i++ {
<del> _, err = RequestIP(network, nil)
<add> _, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> err = ReleaseIP(network, ip)
<add> err = a.ReleaseIP(network, ip)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> }
<ide>
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestGetReleasedIp(t *testing.T) {
<ide> }
<ide>
<ide> func TestGetReleasedIpV6(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
<ide> Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0},
<ide> }
<ide>
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> value := ip.String()
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> for i := 0; i < 253; i++ {
<del> _, err = RequestIP(network, nil)
<add> _, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> err = ReleaseIP(network, ip)
<add> err = a.ReleaseIP(network, ip)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> }
<ide>
<del> ip, err = RequestIP(network, nil)
<add> ip, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestGetReleasedIpV6(t *testing.T) {
<ide> }
<ide>
<ide> func TestRequestSpecificIp(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 224},
<ide> func TestRequestSpecificIp(t *testing.T) {
<ide> ip := net.ParseIP("192.168.0.5")
<ide>
<ide> // Request a "good" IP.
<del> if _, err := RequestIP(network, ip); err != nil {
<add> if _, err := a.RequestIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> // Request the same IP again.
<del> if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated {
<add> if _, err := a.RequestIP(network, ip); err != ErrIPAlreadyAllocated {
<ide> t.Fatalf("Got the same IP twice: %#v", err)
<ide> }
<ide>
<ide> // Request an out of range IP.
<del> if _, err := RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange {
<add> if _, err := a.RequestIP(network, net.ParseIP("192.168.0.42")); err != ErrIPOutOfRange {
<ide> t.Fatalf("Got an out of range IP: %#v", err)
<ide> }
<ide> }
<ide>
<ide> func TestRequestSpecificIpV6(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{0x2a, 0x00, 0x14, 0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1},
<ide> Mask: []byte{255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0}, // /64 netmask
<ide> func TestRequestSpecificIpV6(t *testing.T) {
<ide> ip := net.ParseIP("2a00:1450::5")
<ide>
<ide> // Request a "good" IP.
<del> if _, err := RequestIP(network, ip); err != nil {
<add> if _, err := a.RequestIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> // Request the same IP again.
<del> if _, err := RequestIP(network, ip); err != ErrIPAlreadyAllocated {
<add> if _, err := a.RequestIP(network, ip); err != ErrIPAlreadyAllocated {
<ide> t.Fatalf("Got the same IP twice: %#v", err)
<ide> }
<ide>
<ide> // Request an out of range IP.
<del> if _, err := RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange {
<add> if _, err := a.RequestIP(network, net.ParseIP("2a00:1500::1")); err != ErrIPOutOfRange {
<ide> t.Fatalf("Got an out of range IP: %#v", err)
<ide> }
<ide> }
<ide>
<ide> func TestIPAllocator(t *testing.T) {
<add> a := New()
<add>
<ide> expectedIPs := []net.IP{
<ide> 0: net.IPv4(127, 0, 0, 1),
<ide> 1: net.IPv4(127, 0, 0, 2),
<ide> func TestIPAllocator(t *testing.T) {
<ide> // Check that we get 6 IPs, from 127.0.0.1–127.0.0.6, in that
<ide> // order.
<ide> for i := 0; i < 6; i++ {
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestIPAllocator(t *testing.T) {
<ide> // ↑
<ide>
<ide> // Check that there are no more IPs
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err == nil {
<ide> t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip)
<ide> }
<ide>
<ide> // Release some IPs in non-sequential order
<del> if err := ReleaseIP(network, expectedIPs[3]); err != nil {
<add> if err := a.ReleaseIP(network, expectedIPs[3]); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> // 1(u) - 2(u) - 3(u) - 4(f) - 5(u) - 6(u)
<ide> // ↑
<ide>
<del> if err := ReleaseIP(network, expectedIPs[2]); err != nil {
<add> if err := a.ReleaseIP(network, expectedIPs[2]); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> // 1(u) - 2(u) - 3(f) - 4(f) - 5(u) - 6(u)
<ide> // ↑
<ide>
<del> if err := ReleaseIP(network, expectedIPs[4]); err != nil {
<add> if err := a.ReleaseIP(network, expectedIPs[4]); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> // 1(u) - 2(u) - 3(f) - 4(f) - 5(f) - 6(u)
<ide> func TestIPAllocator(t *testing.T) {
<ide> // with the first released IP
<ide> newIPs := make([]net.IP, 3)
<ide> for i := 0; i < 3; i++ {
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestIPAllocator(t *testing.T) {
<ide> assertIPEquals(t, expectedIPs[3], newIPs[1])
<ide> assertIPEquals(t, expectedIPs[4], newIPs[2])
<ide>
<del> _, err = RequestIP(network, nil)
<add> _, err = a.RequestIP(network, nil)
<ide> if err == nil {
<ide> t.Fatal("There shouldn't be any IP addresses at this point")
<ide> }
<ide> }
<ide>
<ide> func TestAllocateFirstIP(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 0},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestAllocateFirstIP(t *testing.T) {
<ide> firstIP := network.IP.To4().Mask(network.Mask)
<ide> first := big.NewInt(0).Add(ipToBigInt(firstIP), big.NewInt(1))
<ide>
<del> ip, err := RequestIP(network, nil)
<add> ip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestAllocateFirstIP(t *testing.T) {
<ide> }
<ide>
<ide> func TestAllocateAllIps(t *testing.T) {
<del> defer reset()
<add> a := New()
<add>
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestAllocateAllIps(t *testing.T) {
<ide> )
<ide>
<ide> for err == nil {
<del> current, err = RequestIP(network, nil)
<add> current, err = a.RequestIP(network, nil)
<ide> if isFirst {
<ide> first = current
<ide> isFirst = false
<ide> func TestAllocateAllIps(t *testing.T) {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
<add> if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> if err := ReleaseIP(network, first); err != nil {
<add> if err := a.ReleaseIP(network, first); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> again, err := RequestIP(network, nil)
<add> again, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> assertIPEquals(t, first, again)
<ide>
<ide> // ensure that alloc.last == alloc.begin won't result in dead loop
<del> if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
<add> if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> // Test by making alloc.last the only free ip and ensure we get it back
<ide> // #1. first of the range, (alloc.last == ipToInt(first) already)
<del> if err := ReleaseIP(network, first); err != nil {
<add> if err := a.ReleaseIP(network, first); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> ret, err := RequestIP(network, nil)
<add> ret, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestAllocateAllIps(t *testing.T) {
<ide>
<ide> // #2. last of the range, note that current is the last one
<ide> last := net.IPv4(192, 168, 0, 254)
<del> setLastTo(t, network, last)
<add> setLastTo(t, a, network, last)
<ide>
<del> ret, err = RequestIP(network, nil)
<add> ret, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestAllocateAllIps(t *testing.T) {
<ide>
<ide> // #3. middle of the range
<ide> mid := net.IPv4(192, 168, 0, 7)
<del> setLastTo(t, network, mid)
<add> setLastTo(t, a, network, mid)
<ide>
<del> ret, err = RequestIP(network, nil)
<add> ret, err = a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestAllocateAllIps(t *testing.T) {
<ide>
<ide> // make sure the pool is full when calling setLastTo.
<ide> // we don't cheat here
<del>func setLastTo(t *testing.T, network *net.IPNet, ip net.IP) {
<del> if err := ReleaseIP(network, ip); err != nil {
<add>func setLastTo(t *testing.T, a *IPAllocator, network *net.IPNet, ip net.IP) {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<del> ret, err := RequestIP(network, nil)
<add> ret, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide> assertIPEquals(t, ip, ret)
<ide>
<del> if err := ReleaseIP(network, ip); err != nil {
<add> if err := a.ReleaseIP(network, ip); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> }
<ide>
<ide> func TestAllocateDifferentSubnets(t *testing.T) {
<del> defer reset()
<add> a := New()
<ide> network1 := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestAllocateDifferentSubnets(t *testing.T) {
<ide> 8: net.ParseIP("2a00:1632::2"),
<ide> }
<ide>
<del> ip11, err := RequestIP(network1, nil)
<add> ip11, err := a.RequestIP(network1, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip12, err := RequestIP(network1, nil)
<add> ip12, err := a.RequestIP(network1, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip21, err := RequestIP(network2, nil)
<add> ip21, err := a.RequestIP(network2, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip22, err := RequestIP(network2, nil)
<add> ip22, err := a.RequestIP(network2, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip31, err := RequestIP(network3, nil)
<add> ip31, err := a.RequestIP(network3, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip32, err := RequestIP(network3, nil)
<add> ip32, err := a.RequestIP(network3, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip33, err := RequestIP(network3, nil)
<add> ip33, err := a.RequestIP(network3, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip41, err := RequestIP(network4, nil)
<add> ip41, err := a.RequestIP(network4, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<del> ip42, err := RequestIP(network4, nil)
<add> ip42, err := a.RequestIP(network4, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestAllocateDifferentSubnets(t *testing.T) {
<ide> }
<ide>
<ide> func TestRegisterBadTwice(t *testing.T) {
<del> defer reset()
<add> a := New()
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 1, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestRegisterBadTwice(t *testing.T) {
<ide> Mask: []byte{255, 255, 255, 248},
<ide> }
<ide>
<del> if err := RegisterSubnet(network, subnet); err != nil {
<add> if err := a.RegisterSubnet(network, subnet); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> subnet = &net.IPNet{
<ide> IP: []byte{192, 168, 1, 16},
<ide> Mask: []byte{255, 255, 255, 248},
<ide> }
<del> if err := RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
<add> if err := a.RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered {
<ide> t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err)
<ide> }
<ide> }
<ide>
<ide> func TestRegisterBadRange(t *testing.T) {
<del> defer reset()
<add> a := New()
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 1, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestRegisterBadRange(t *testing.T) {
<ide> IP: []byte{192, 168, 1, 1},
<ide> Mask: []byte{255, 255, 0, 0},
<ide> }
<del> if err := RegisterSubnet(network, subnet); err != ErrBadSubnet {
<add> if err := a.RegisterSubnet(network, subnet); err != ErrBadSubnet {
<ide> t.Fatalf("Expected ErrBadSubnet error, got %v", err)
<ide> }
<ide> }
<ide>
<ide> func TestAllocateFromRange(t *testing.T) {
<del> defer reset()
<add> a := New()
<ide> network := &net.IPNet{
<ide> IP: []byte{192, 168, 0, 1},
<ide> Mask: []byte{255, 255, 255, 0},
<ide> func TestAllocateFromRange(t *testing.T) {
<ide> Mask: []byte{255, 255, 255, 248},
<ide> }
<ide>
<del> if err := RegisterSubnet(network, subnet); err != nil {
<add> if err := a.RegisterSubnet(network, subnet); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> expectedIPs := []net.IP{
<ide> func TestAllocateFromRange(t *testing.T) {
<ide> 5: net.IPv4(192, 168, 0, 14),
<ide> }
<ide> for _, ip := range expectedIPs {
<del> rip, err := RequestIP(network, nil)
<add> rip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> assertIPEquals(t, ip, rip)
<ide> }
<ide>
<del> if _, err := RequestIP(network, nil); err != ErrNoAvailableIPs {
<add> if _, err := a.RequestIP(network, nil); err != ErrNoAvailableIPs {
<ide> t.Fatalf("Expected ErrNoAvailableIPs error, got %v", err)
<ide> }
<ide> for _, ip := range expectedIPs {
<del> ReleaseIP(network, ip)
<del> rip, err := RequestIP(network, nil)
<add> a.ReleaseIP(network, ip)
<add> rip, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func BenchmarkRequestIP(b *testing.B) {
<ide> Mask: []byte{255, 255, 255, 0},
<ide> }
<ide> b.ResetTimer()
<add>
<ide> for i := 0; i < b.N; i++ {
<add> a := New()
<add>
<ide> for j := 0; j < 253; j++ {
<del> _, err := RequestIP(network, nil)
<add> _, err := a.RequestIP(network, nil)
<ide> if err != nil {
<ide> b.Fatal(err)
<ide> }
<ide> }
<del> reset()
<ide> }
<ide> } | 3 |
Python | Python | use fresh state between tests | 4a3f6c48e6f65203d7a1b4883a92fae2a2d043ab | <ide><path>numpy/core/tests/test_datetime.py
<ide> def test_pydatetime_creation(self):
<ide> def test_datetime_string_conversion(self):
<ide> a = ['2011-03-16', '1920-01-01', '2013-05-19']
<ide> str_a = np.array(a, dtype='S')
<add> uni_a = np.array(a, dtype='U')
<ide> dt_a = np.array(a, dtype='M')
<del> str_b = np.empty_like(str_a)
<del> dt_b = np.empty_like(dt_a)
<ide>
<ide> # String to datetime
<ide> assert_equal(dt_a, str_a.astype('M'))
<ide> assert_equal(dt_a.dtype, str_a.astype('M').dtype)
<add> dt_b = np.empty_like(dt_a)
<ide> dt_b[...] = str_a
<ide> assert_equal(dt_a, dt_b)
<add>
<ide> # Datetime to string
<ide> assert_equal(str_a, dt_a.astype('S0'))
<add> str_b = np.empty_like(str_a)
<ide> str_b[...] = dt_a
<ide> assert_equal(str_a, str_b)
<ide>
<del> # Convert the 'S' to 'U'
<del> str_a = str_a.astype('U')
<del> str_b = str_b.astype('U')
<del>
<ide> # Unicode to datetime
<del> assert_equal(dt_a, str_a.astype('M'))
<del> assert_equal(dt_a.dtype, str_a.astype('M').dtype)
<del> dt_b[...] = str_a
<add> assert_equal(dt_a, uni_a.astype('M'))
<add> assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
<add> dt_b = np.empty_like(dt_a)
<add> dt_b[...] = uni_a
<ide> assert_equal(dt_a, dt_b)
<add>
<ide> # Datetime to unicode
<del> assert_equal(str_a, dt_a.astype('U'))
<add> assert_equal(uni_a, dt_a.astype('U'))
<add> uni_b = np.empty_like(uni_a)
<add> uni_b[...] = dt_a
<add> assert_equal(uni_a, uni_b)
<ide>
<ide> # Datetime to long string - gh-9712
<ide> assert_equal(str_a, dt_a.astype((np.string_, 128))) | 1 |
Go | Go | add more import comments | c725eff3e2ecb1701b1ecb7b61c926b467be0b3a | <ide><path>pkg/capabilities/caps.go
<ide> // Package capabilities allows to generically handle capabilities.
<del>package capabilities
<add>package capabilities // import "github.com/docker/docker/pkg/capabilities"
<ide>
<ide> // Set represents a set of capabilities.
<ide> type Set map[string]struct{}
<ide><path>pkg/capabilities/caps_test.go
<del>package capabilities
<add>package capabilities // import "github.com/docker/docker/pkg/capabilities"
<ide>
<ide> import (
<ide> "fmt"
<ide><path>rootless/rootless.go
<del>package rootless
<add>package rootless // import "github.com/docker/docker/rootless"
<ide>
<ide> import (
<ide> "os"
<ide><path>rootless/specconv/specconv_linux.go
<del>package specconv
<add>package specconv // import "github.com/docker/docker/rootless/specconv"
<ide>
<ide> import (
<ide> "io/ioutil" | 4 |
Javascript | Javascript | add "keyboardexample" to rntester | 584b968683167249b98304eeff2fb711fcbc51e8 | <ide><path>packages/rn-tester/js/examples/Keyboard/KeyboardExample.js
<add>/**
<add> * Copyright (c) Meta Platforms, Inc. and affiliates.
<add> *
<add> * This source code is licensed under the MIT license found in the
<add> * LICENSE file in the root directory of this source tree.
<add> *
<add> * @format
<add> * @flow strict-local
<add> */
<add>
<add>'use strict';
<add>
<add>import type {
<add> RNTesterModule,
<add> RNTesterModuleExample,
<add>} from '../../types/RNTesterTypes';
<add>import type {KeyboardEvent} from 'react-native/Libraries/Components/Keyboard/Keyboard';
<add>
<add>import * as React from 'react';
<add>import {useEffect, useState} from 'react';
<add>import {Keyboard, StyleSheet, Text, View} from 'react-native';
<add>
<add>type KeybpardEventViewerProps = {
<add> showEvent: 'keyboardWillShow' | 'keyboardDidShow',
<add> hideEvent: 'keyboardWillHide' | 'keyboardDidHide',
<add>};
<add>
<add>const KeyboardEventViewer = (props: KeybpardEventViewerProps): React.Node => {
<add> const {showEvent, hideEvent} = props;
<add> const [isShown, setIsShown] = useState(false);
<add> const [lastEvent, setLastEvent] = useState<?KeyboardEvent>();
<add>
<add> useEffect(() => {
<add> const subscription = Keyboard.addListener(showEvent, ev => {
<add> setIsShown(true);
<add> setLastEvent(ev);
<add> });
<add> return () => subscription.remove();
<add> }, [showEvent]);
<add>
<add> useEffect(() => {
<add> const subscription = Keyboard.addListener(hideEvent, ev => {
<add> setIsShown(false);
<add> setLastEvent(ev);
<add> });
<add> return () => subscription.remove();
<add> }, [hideEvent]);
<add>
<add> return (
<add> <View>
<add> <Text>
<add> <Text>Keyboard is </Text>
<add> {isShown ? (
<add> <Text style={styles.openText}>open</Text>
<add> ) : (
<add> <Text style={styles.closeText}>closed</Text>
<add> )}
<add> </Text>
<add> <View style={styles.eventBox}>
<add> <Text>
<add> {lastEvent
<add> ? JSON.stringify(lastEvent, null, 2)
<add> : 'No events observed'}
<add> </Text>
<add> </View>
<add> </View>
<add> );
<add>};
<add>
<add>const keyboardWillShowHideExample: RNTesterModuleExample = {
<add> title: 'keyboardWillShow / keyboardWillHide',
<add> platform: 'ios',
<add> render: () => (
<add> <KeyboardEventViewer
<add> showEvent="keyboardWillShow"
<add> hideEvent="keyboardWillHide"
<add> />
<add> ),
<add>};
<add>
<add>const keyboardDidShowHideExample: RNTesterModuleExample = {
<add> title: 'keyboardDidShow / keyboardDidHide',
<add> render: () => (
<add> <KeyboardEventViewer
<add> showEvent="keyboardDidShow"
<add> hideEvent="keyboardDidHide"
<add> />
<add> ),
<add>};
<add>
<add>const styles = StyleSheet.create({
<add> closeText: {
<add> color: 'red',
<add> },
<add> openText: {
<add> color: 'green',
<add> },
<add> eventBox: {
<add> marginTop: 10,
<add> padding: 5,
<add> borderWidth: StyleSheet.hairlineWidth,
<add> },
<add>});
<add>
<add>const KeyboardExample: RNTesterModule = {
<add> title: 'Keyboard',
<add> description: 'Demonstrates usage of the "Keyboard" static API',
<add> documentationURL: 'https://reactnative.dev/docs/keyboard',
<add> category: 'Basic',
<add> examples: [keyboardWillShowHideExample, keyboardDidShowHideExample],
<add>};
<add>
<add>export default KeyboardExample;
<ide><path>packages/rn-tester/js/utils/RNTesterList.android.js
<ide> const APIs: Array<RNTesterModuleInfo> = [
<ide> category: 'UI',
<ide> module: require('../examples/Dimensions/DimensionsExample'),
<ide> },
<add> {
<add> key: 'Keyboard',
<add> category: 'Basic',
<add> module: require('../examples/Keyboard/KeyboardExample').default,
<add> },
<ide> {
<ide> key: 'LayoutEventsExample',
<ide> category: 'UI',
<ide><path>packages/rn-tester/js/utils/RNTesterList.ios.js
<ide> const APIs: Array<RNTesterModuleInfo> = [
<ide> module: require('../examples/Dimensions/DimensionsExample'),
<ide> supportsTVOS: true,
<ide> },
<add> {
<add> key: 'Keyboard',
<add> module: require('../examples/Keyboard/KeyboardExample').default,
<add> supportsTVOS: true,
<add> },
<ide> {
<ide> key: 'LayoutAnimationExample',
<ide> module: require('../examples/Layout/LayoutAnimationExample'), | 3 |
Python | Python | fix outstanding typo in numpy version | 60608572d7740dabf287d2536684eacbc46561ab | <ide><path>numpy/f2py/cfuncs.py
<ide> 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
<ide> cppmacros['OLDPYNUM'] = """\
<ide> #ifdef OLDPYNUM
<del>#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html
<add>#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html
<ide> #endif
<ide> """
<ide> ################# C functions ############### | 1 |
Javascript | Javascript | remove three-math from npm build, never used | 87b5f7e9bdc2de3b9e41e9f117d04f079bb73b7e | <ide><path>utils/npm/build.js
<ide> buildAll.stderr.on('data', function (data) {
<ide> buildAll.on( 'exit', function ( exitCode ) {
<ide> console.log( "exitCode: " + exitCode );
<ide> buildModule( "three" );
<del> buildModule( "three-math" );
<ide> }); | 1 |
Text | Text | add object.freeze to polyfill list | e954a1c0d992408748abf39d058a44cc19f5ab63 | <ide><path>docs/docs/07-working-with-the-browser.md
<ide> In addition to that philosophy, we've also taken the stance that we, as authors
<ide> `es5-sham.js`, also from [kriskowal's es5-shim](https://github.com/kriskowal/es5-shim), provides the following that React needs:
<ide>
<ide> * `Object.create`
<add>* `Object.freeze`
<ide>
<ide> The unminified build of React needs the following from [paulmillr's console-polyfill](https://github.com/paulmillr/console-polyfill).
<ide> | 1 |
Javascript | Javascript | use file url instead of relative url | 8a12e9994f047888ce25e0cc196e5da7f7c3dcae | <ide><path>lib/internal/bootstrap/pre_execution.js
<ide> function initializePolicy() {
<ide> // no bare specifiers for now
<ide> let manifestURL;
<ide> if (require('path').isAbsolute(experimentalPolicy)) {
<del> manifestURL = new URL(experimentalPolicy, 'file://');
<add> manifestURL = new URL(`file://${experimentalPolicy}`);
<ide> } else {
<ide> const cwdURL = pathToFileURL(process.cwd());
<ide> cwdURL.pathname += '/'; | 1 |
Javascript | Javascript | simplify rtladapter export | 10c3c3bbc9dfdf18a817f7a48acba1a334e39632 | <ide><path>src/helpers/helpers.rtl.js
<del>const getRtlAdapter = function(rectX, width) {
<add>const getRightToLeftAdapter = function(rectX, width) {
<ide> return {
<ide> x(x) {
<ide> return rectX + rectX + width - x;
<ide> const getRtlAdapter = function(rectX, width) {
<ide> };
<ide> };
<ide>
<del>const getLtrAdapter = function() {
<add>const getLeftToRightAdapter = function() {
<ide> return {
<ide> x(x) {
<ide> return x;
<ide> const getLtrAdapter = function() {
<ide> };
<ide> };
<ide>
<del>const getAdapter = function(rtl, rectX, width) {
<del> return rtl ? getRtlAdapter(rectX, width) : getLtrAdapter();
<del>};
<add>export function getRtlAdapter(rtl, rectX, width) {
<add> return rtl ? getRightToLeftAdapter(rectX, width) : getLeftToRightAdapter();
<add>}
<ide>
<del>const overrideTextDirection = function(ctx, direction) {
<add>export function overrideTextDirection(ctx, direction) {
<ide> let style, original;
<ide> if (direction === 'ltr' || direction === 'rtl') {
<ide> style = ctx.canvas.style;
<ide> const overrideTextDirection = function(ctx, direction) {
<ide> style.setProperty('direction', direction, 'important');
<ide> ctx.prevTextDirection = original;
<ide> }
<del>};
<add>}
<ide>
<del>const restoreTextDirection = function(ctx, original) {
<add>export function restoreTextDirection(ctx, original) {
<ide> if (original !== undefined) {
<ide> delete ctx.prevTextDirection;
<ide> ctx.canvas.style.setProperty('direction', original[0], original[1]);
<ide> }
<del>};
<del>
<del>export {
<del> getAdapter as getRtlAdapter,
<del> overrideTextDirection,
<del> restoreTextDirection
<del>};
<add>} | 1 |
PHP | PHP | add findorfail to query builder | bcf036f35ac78e469e9bf8c00d7dbf47bda972d1 | <ide><path>src/Illuminate/Database/Eloquent/Builder.php
<ide> public function first($columns = array('*'))
<ide> return $this->take(1)->get($columns)->first();
<ide> }
<ide>
<add> /**
<add> * Find a model by its primary key or throw an exception.
<add> *
<add> * @param mixed $id
<add> * @param array $columns
<add> * @return \Illuminate\Database\Eloquent\Model|Collection
<add> */
<add> public function findOrFail($id, $columns = array('*'))
<add> {
<add> if ( ! is_null($model = $this->find($id, $columns))) return $model;
<add>
<add> throw new ModelNotFoundException;
<add> }
<add>
<ide> /**
<ide> * Execute the query and get the first result or throw an exception.
<ide> * | 1 |
Javascript | Javascript | deprecate the module and its contents | 67f54b660038de2b4346b3e76d66a8dc8ccb1f9b | <ide><path>src/ngTouch/directive/ngSwipe.js
<ide> * @ngdoc directive
<ide> * @name ngSwipeLeft
<ide> *
<add> * @deprecated
<add> * sinceVersion="1.7.0"
<add> *
<add> * See the {@link ngTouch module} documentation for more information.
<add> *
<ide> * @description
<ide> * Specify custom behavior when an element is swiped to the left on a touchscreen device.
<ide> * A leftward swipe is a quick, right-to-left slide of the finger.
<ide> * @ngdoc directive
<ide> * @name ngSwipeRight
<ide> *
<add> * @deprecated
<add> * sinceVersion="1.7.0"
<add> *
<add> * See the {@link ngTouch module} documentation for more information.
<add> *
<ide> * @description
<ide> * Specify custom behavior when an element is swiped to the right on a touchscreen device.
<ide> * A rightward swipe is a quick, left-to-right slide of the finger.
<ide><path>src/ngTouch/swipe.js
<ide> * @ngdoc service
<ide> * @name $swipe
<ide> *
<add> * @deprecated
<add> * sinceVersion="1.7.0"
<add> *
<add> * See the {@link ngTouch module} documentation for more information.
<add> *
<ide> * @description
<ide> * The `$swipe` service is a service that abstracts the messier details of hold-and-drag swipe
<ide> * behavior, to make implementing swipe-related directives more convenient.
<ide><path>src/ngTouch/touch.js
<ide> *
<ide> * See {@link ngTouch.$swipe `$swipe`} for usage.
<ide> *
<add> * @deprecated
<add> * sinceVersion="1.7.0"
<add> * The ngTouch module with the {@link ngTouch.$swipe `$swipe`} service and
<add> * the {@link ngTouch.ngSwipeLeft} and {@link ngTouch.ngSwipeRight} directives are
<add> * deprecated. Instead, stand-alone libraries for touch handling and gesture interaction
<add> * should be used, for example [HammerJS](https://hammerjs.github.io/) (which is also used by
<add> * Angular).
<ide> */
<ide>
<ide> // define ngTouch module | 3 |
Text | Text | fix typos and wording [ci skip] | 25a595dc10ef60ee5d20b6f06bf123119e2a0238 | <ide><path>website/docs/api/language.md
<ide> subclass of the built-in `dict`. It supports the additional methods `to_disk`
<ide>
<ide> ## Language.to_disk {#to_disk tag="method" new="2"}
<ide>
<del>Save the current state to a directory. If a trained pipeline is loaded, this
<del>will **include all model data**.
<add>Save the current state to a directory. Under the hood, this method delegates to
<add>the `to_disk` methods of the individual pipeline components, if available. This
<add>means that if a trained pipeline is loaded, all components and their weights
<add>will be saved to disk.
<ide>
<ide> > #### Example
<ide> >
<ide><path>website/docs/usage/linguistic-features.md
<ide> print(doc.text, [token.text for token in doc])
<ide>
<ide> Keep in mind that your models' results may be less accurate if the tokenization
<ide> during training differs from the tokenization at runtime. So if you modify a
<del>trained pipeline' tokenization afterwards, it may produce very different
<add>trained pipeline's tokenization afterwards, it may produce very different
<ide> predictions. You should therefore train your pipeline with the **same
<ide> tokenizer** it will be using at runtime. See the docs on
<ide> [training with custom tokenization](#custom-tokenizer-training) for details. | 2 |
Python | Python | add assertisinstance to flasktestcase on 2.6 | d43bfb261a0cb562fdfec6d94768c80d95844578 | <ide><path>flask/testsuite/__init__.py
<ide> def assertIn(self, x, y):
<ide> def assertNotIn(self, x, y):
<ide> assert x not in y, "%r unexpectedly in %r" % (x, y)
<ide>
<add> def assertIsInstance(self, x, y):
<add> assert isinstance(x, y), "not isinstance(%r, %r)" % (x, y)
<add>
<ide>
<ide> class _ExceptionCatcher(object):
<ide> | 1 |
Text | Text | fix typos in text | d76bccc2f85c5eaa768ef12bc24159800ae19e24 | <ide><path>guide/english/vim/useful-commands/index.md
<ide> Depending on the configuration, you may enter a file browser by typing and enter
<ide> ## Pasting blocks of code
<ide>
<ide> Very often you will find yourself looking for solutions to problems, and finding someone has written a block of code that does exactly what you want.
<del>If you try to copy and paste the code directly into Vim you might find that the code is weirdly formatted or hasnt't been pasted correctly.
<add>If you try to copy and paste the code directly into Vim you might find that the code is weirdly formatted or hasn't been pasted correctly.
<ide> This is due to the fact that Vim reads each character that you paste one after the other, meaning any key-combinations that activates a Vim shortcut will be executed and Vim will try (and fail) to automatically indent the pasted code.
<ide>
<ide> To overcome this you can use Vim's **Paste mode** which you can activate by entering normal mode (press `escape` or `crtl + c`) and type `:set paste`, then press ENTER.
<ide> To repeat the last change made to a file, press `.` in __Normal Mode__.
<ide> - vi filename +n, where n is the line number
<ide>
<ide> ## Opening a file searching for word/term
<del>- vi filename +/word, where word is what you are looking for. The cursor will be positioned on the first occurence of the word.
<add>- vi filename +/word, where word is what you are looking for. The cursor will be positioned on the first occurrence of the word.
<ide>
<ide> ## Searching a file in Vim
<ide>
<ide> Vim has very tight integration with the `make` build automation tool. If your pr
<ide> 3. Type `%s,word_to_be_replaced,new_word,g`;
<ide> 4. Press `ENTER`.
<ide>
<del>Important: if you want to replace the first occurence of the word, remove the `g` at the end.
<add>Important: if you want to replace the first occurrence of the word, remove the `g` at the end.
<ide>
<ide> ## I Want to Learn Vim!
<ide> | 1 |
Go | Go | fix a typo | 4ddc721f234ebb721b9540af3a9358da2f3e6e58 | <ide><path>integration-cli/docker_api_resize_test.go
<ide> func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) {
<ide> defer deleteAllContainers()
<ide> cleanedContainerID := strings.TrimSpace(out)
<ide>
<del> // make sure the exited cintainer is not running
<add> // make sure the exited container is not running
<ide> runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID)
<ide> out, _, err = runCommandWithOutput(runCmd)
<ide> if err != nil { | 1 |
Ruby | Ruby | make some constant strings into actual constants | 33cdff99bad133f36935dce23f0889c71d70105b | <ide><path>Library/Homebrew/cmd/bottle.rb
<ide>
<ide> BOTTLE_ERB = <<-EOS
<ide> bottle do
<del> <% if root_url != BottleSpecification.new.root_url %>
<add> <% if root_url != BottleSpecification::DEFAULT_ROOT_URL %>
<ide> root_url "<%= root_url %>"
<ide> <% end %>
<ide> <% if prefix.to_s != "/usr/local" %>
<ide><path>Library/Homebrew/software_spec.rb
<ide> def compatible_cellar?
<ide> end
<ide>
<ide> class BottleSpecification
<add> DEFAULT_PREFIX = "/usr/local".freeze
<add> DEFAULT_CELLAR = "/usr/local/Cellar".freeze
<add> DEFAULT_ROOT_URL = "https://downloads.sf.net/project/machomebrew/Bottles".freeze
<add>
<ide> attr_rw :root_url, :prefix, :cellar, :revision
<ide> attr_reader :checksum, :collector
<ide>
<ide> def initialize
<ide> @revision = 0
<del> @prefix = '/usr/local'
<del> @cellar = '/usr/local/Cellar'
<del> @root_url = 'https://downloads.sf.net/project/machomebrew/Bottles'
<add> @prefix = DEFAULT_PREFIX
<add> @cellar = DEFAULT_CELLAR
<add> @root_url = DEFAULT_ROOT_URL
<ide> @collector = BottleCollector.new
<ide> end
<ide> | 2 |
Text | Text | fix logo path | 230dc07d372cccb1aa1198ae300d06e418c73af6 | <ide><path>README.md
<ide> Platform-as-a-Service. It benefits directly from the experience
<ide> accumulated over several years of large-scale operation and support of
<ide> hundreds of thousands of applications and databases.
<ide>
<del>
<add>
<ide>
<ide> ## Better than VMs
<ide> | 1 |
Python | Python | convert fortran flags from environment variable | eb85c3fda4a1a2687745d7ac5d0a96fc94691f77 | <ide><path>numpy/distutils/fcompiler/environment.py
<ide> def _get_var(self, name, conf_desc):
<ide> if envvar is not None:
<ide> envvar_contents = os.environ.get(envvar)
<ide> if envvar_contents is not None:
<del> if var and append:
<add> if convert:
<add> envvar_contents = convert(envvar_contents)
<add> if var and append: # in case var is None?
<ide> if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
<del> var = var + [envvar_contents]
<add> var.extend(envvar_contents)
<ide> else:
<ide> var = envvar_contents
<ide> if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
<ide> def _get_var(self, name, conf_desc):
<ide> else:
<ide> var = envvar_contents
<ide> if confvar is not None and self._conf:
<add> # FIXME: conf shall do the conversion immediately after the key is parsed.
<add> # postpartum conversion will not always catch the right cases.
<ide> var = self._conf.get(confvar, (None, var))[1]
<ide> if convert is not None:
<ide> var = convert(var) | 1 |
Javascript | Javascript | fix fingerprint name | fbd9fcd8fbfa14688e60c58cf062b55d04c2bfe0 | <ide><path>src/api.js
<ide> return this.pdf.numPages;
<ide> },
<ide> get fingerprint() {
<del> return this.pdf.fingerPrint;
<add> return this.pdf.fingerprint;
<ide> },
<ide> getPage: function(number) {
<ide> var promise = new PDFJS.Promise(); | 1 |
Go | Go | add cleanupin tests to remove leftover containers | f854529ae8c9e0139e9ca0466c597b133e3bf41e | <ide><path>runtime_test.go
<ide> func init() {
<ide> globalRuntime = runtime
<ide> }
<ide>
<add> // Cleanup any leftover container
<add> for _, container := range globalRuntime.List() {
<add> if err := globalRuntime.Destroy(container); err != nil {
<add> log.Fatalf("Error destroying leftover container: %s", err)
<add> }
<add> }
<add>
<ide> // Create the "Server"
<ide> srv := &Server{
<ide> runtime: globalRuntime, | 1 |
Javascript | Javascript | remove debug code from tests + re-enable tests | a29d67eea3c852385bf0c1f4e9e00400c7aedc66 | <ide><path>packager/react-packager/src/Bundler/__tests__/Bundle-test.js
<ide> describe('Bundle', () => {
<ide> });
<ide> });
<ide>
<del> fpit('should insert modules in a deterministic order, independent from timing of the wrapping process', () => {
<add> pit('should insert modules in a deterministic order, independent from timing of the wrapping process', () => {
<ide> const moduleTransports = [
<ide> createModuleTransport({name: 'module1'}),
<ide> createModuleTransport({name: 'module2'}),
<ide> describe('Bundle', () => {
<ide> }
<ide> };
<ide>
<del> console.log(bundle.addModule+'')
<ide> const promise = Promise.all(
<ide> moduleTransports.map(m => bundle.addModule(resolver, null, null, m)))
<ide> .then(() => { | 1 |
Javascript | Javascript | add default attribute for use with track element | bb3a326ecd38698ac02d93019f7ff126e8ef4a11 | <ide><path>src/renderers/dom/shared/HTMLDOMPropertyConfig.js
<ide> var HTMLDOMPropertyConfig = {
<ide> crossOrigin: null,
<ide> data: null, // For `<object />` acts as `src`.
<ide> dateTime: MUST_USE_ATTRIBUTE,
<add> default: HAS_BOOLEAN_VALUE,
<ide> defer: HAS_BOOLEAN_VALUE,
<ide> dir: null,
<ide> disabled: MUST_USE_ATTRIBUTE | HAS_BOOLEAN_VALUE, | 1 |
Javascript | Javascript | add error for new behavior | 7851e77cefefdb589b3f844b23a73366e3488f64 | <ide><path>src/geometries/ParametricGeometry.js
<ide> function ParametricBufferGeometry( func, slices, stacks ) {
<ide>
<ide> var i, j;
<ide>
<add> if ( func.length < 3 ) {
<add>
<add> console.error( 'Parametric geometries now require modification of a third THREE.Vector3 argument.' );
<add>
<add> }
<add>
<ide> // generate vertices, normals and uvs
<ide>
<ide> var sliceCount = slices + 1; | 1 |
Java | Java | add gson converter to mvc config | e3a6fce403901499c18b8a794be02e7b46af268b | <ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/config/AnnotationDrivenBeanDefinitionParser.java
<ide> import java.util.Properties;
<ide>
<ide> import org.springframework.web.servlet.mvc.method.annotation.JsonViewResponseBodyInterceptor;
<add>import org.springframework.http.converter.json.GsonHttpMessageConverter;
<ide> import org.w3c.dom.Element;
<ide>
<ide> import org.springframework.beans.factory.FactoryBean;
<ide> class AnnotationDrivenBeanDefinitionParser implements BeanDefinitionParser {
<ide> ClassUtils.isPresent("com.fasterxml.jackson.databind.ObjectMapper", AnnotationDrivenBeanDefinitionParser.class.getClassLoader()) &&
<ide> ClassUtils.isPresent("com.fasterxml.jackson.core.JsonGenerator", AnnotationDrivenBeanDefinitionParser.class.getClassLoader());
<ide>
<add> private static final boolean gsonPresent =
<add> ClassUtils.isPresent("com.google.gson.Gson", AnnotationDrivenBeanDefinitionParser.class.getClassLoader());
<add>
<ide>
<ide> @Override
<ide> public BeanDefinition parse(Element element, ParserContext parserContext) {
<ide> private Properties getDefaultMediaTypes() {
<ide> if (jaxb2Present) {
<ide> props.put("xml", MediaType.APPLICATION_XML_VALUE);
<ide> }
<del> if (jackson2Present) {
<add> if (jackson2Present || gsonPresent) {
<ide> props.put("json", MediaType.APPLICATION_JSON_VALUE);
<ide> }
<ide> return props;
<ide> private ManagedList<?> getMessageConverters(Element element, Object source, Pars
<ide> if (jackson2Present) {
<ide> messageConverters.add(createConverterDefinition(MappingJackson2HttpMessageConverter.class, source));
<ide> }
<add> else if (gsonPresent) {
<add> messageConverters.add(createConverterDefinition(GsonHttpMessageConverter.class, source));
<add> }
<ide> }
<ide> return messageConverters;
<ide> }
<ide><path>spring-webmvc/src/main/java/org/springframework/web/servlet/config/annotation/WebMvcConfigurationSupport.java
<ide> import org.springframework.http.converter.StringHttpMessageConverter;
<ide> import org.springframework.http.converter.feed.AtomFeedHttpMessageConverter;
<ide> import org.springframework.http.converter.feed.RssChannelHttpMessageConverter;
<add>import org.springframework.http.converter.json.GsonHttpMessageConverter;
<ide> import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter;
<ide> import org.springframework.http.converter.support.AllEncompassingFormHttpMessageConverter;
<ide> import org.springframework.http.converter.xml.Jaxb2RootElementHttpMessageConverter;
<ide> public class WebMvcConfigurationSupport implements ApplicationContextAware, Serv
<ide> ClassUtils.isPresent("com.fasterxml.jackson.databind.ObjectMapper", WebMvcConfigurationSupport.class.getClassLoader()) &&
<ide> ClassUtils.isPresent("com.fasterxml.jackson.core.JsonGenerator", WebMvcConfigurationSupport.class.getClassLoader());
<ide>
<add> private static final boolean gsonPresent =
<add> ClassUtils.isPresent("com.google.gson.Gson", WebMvcConfigurationSupport.class.getClassLoader());
<add>
<ide>
<ide> private ServletContext servletContext;
<ide>
<ide> protected Map<String, MediaType> getDefaultMediaTypes() {
<ide> if (jaxb2Present) {
<ide> map.put("xml", MediaType.APPLICATION_XML);
<ide> }
<del> if (jackson2Present) {
<add> if (jackson2Present || gsonPresent) {
<ide> map.put("json", MediaType.APPLICATION_JSON);
<ide> }
<ide> return map;
<ide> protected final void addDefaultHttpMessageConverters(List<HttpMessageConverter<?
<ide> if (jackson2Present) {
<ide> messageConverters.add(new MappingJackson2HttpMessageConverter());
<ide> }
<add> else if (gsonPresent) {
<add> messageConverters.add(new GsonHttpMessageConverter());
<add> }
<ide> }
<ide>
<ide> /** | 2 |
Python | Python | apply `print` fixer | bb726ca19f434f5055c0efceefe48d89469fcbbe | <ide><path>doc/cdoc/numpyfilter.py
<ide> Also, add Doxygen /** and /**< syntax automatically where appropriate.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import re
<ide><path>doc/cython/run_test.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpyx import test
<ide> test()
<ide><path>doc/cython/setup.py
<ide> http://cython.org.
<ide>
<ide> """
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> from distutils.core import setup
<ide> from distutils.extension import Extension
<ide><path>doc/example.py
<ide> a line by itself, preferably preceeded by a blank line.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os # standard library imports first
<ide>
<ide><path>doc/newdtype_example/example.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import floatint.floatint as ff
<ide> import numpy as np
<ide>
<ide> # Now, the elements will be the scalar type associated
<ide> # with the ndarray.
<del>print g[0]
<del>print type(g[1])
<add>print(g[0])
<add>print(type(g[1]))
<ide>
<ide> # Now, you need to register ufuncs and more arrfuncs to do useful things...
<ide><path>doc/newdtype_example/floatint/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide>
<ide><path>doc/newdtype_example/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> from numpy.distutils.core import setup
<ide>
<ide><path>doc/numpybook/comparison/ctypes/filter.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['filter2d']
<ide>
<ide><path>doc/numpybook/comparison/ctypes/interface.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['add', 'filter2d']
<ide>
<ide><path>doc/numpybook/comparison/pyrex/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> from distutils.core import setup
<ide> from distutils.extension import Extension
<ide><path>doc/numpybook/comparison/timing.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import timeit
<ide>
<ide> for kind in ['f2py']:#['ctypes', 'pyrex', 'weave', 'f2py']:
<ide> res[kind] = []
<ide> sys.path = ['/Users/oliphant/numpybook/%s' % (kind,)] + path
<del> print sys.path
<add> print(sys.path)
<ide> for n in N:
<del> print "%s - %d" % (kind, n)
<add> print("%s - %d" % (kind, n))
<ide> t = timeit.Timer(eval('%s_run'%kind), eval('%s_pre %% (%d,%d)'%(kind,n,n)))
<ide> mytime = min(t.repeat(3,100))
<ide> res[kind].append(mytime)
<ide><path>doc/numpybook/comparison/weave/filter.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from scipy import weave, zeros_like
<ide>
<ide><path>doc/numpybook/comparison/weave/inline.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from scipy import weave
<ide> from numpy import rand, zeros_like
<ide> def arr(a):
<ide> return b
<ide>
<ide> a = [None]*10
<del>print example1(a)
<del>print a
<add>print(example1(a))
<add>print(a)
<ide>
<ide> a = rand(512,512)
<ide> b = arr(a)
<ide><path>doc/numpybook/runcode.py
<ide> -n name of code section (default MyCode)
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import optparse
<ide> newre = re.compile(r"\\begin_inset Note.*PYNEW\s+\\end_inset", re.DOTALL)
<ide>
<ide> def getoutput(tstr, dic):
<del> print "\n\nRunning..."
<del> print tstr,
<add> print("\n\nRunning...")
<add> print(tstr, end=' ')
<ide> tempstr = io.StringIO()
<ide> sys.stdout = tempstr
<ide> code = compile(tstr, '<input>', 'exec')
<ide> def getoutput(tstr, dic):
<ide> else:
<ide> res = tempstr.getvalue() + '\n' + repr(res)
<ide> if res != '':
<del> print "\nOutput is"
<del> print res,
<add> print("\nOutput is")
<add> print(res, end=' ')
<ide> return res
<ide>
<ide> # now find the code in the code segment
<ide> def runpycode(lyxstr, name='MyCode'):
<ide> num += 1
<ide>
<ide> if num == 0:
<del> print "Nothing found for %s" % name
<add> print("Nothing found for %s" % name)
<ide> return lyxstr
<ide>
<ide> start = 0
<ide> def main(args):
<ide> fid = file(args[0])
<ide> str = fid.read()
<ide> fid.close()
<del> print "Processing %s" % options.name
<add> print("Processing %s" % options.name)
<ide> newstr = runpycode(str, options.name)
<ide> fid = file(args[0],'w')
<ide> fid.write(newstr)
<ide><path>doc/postprocess.py
<ide> MODE is either 'html' or 'tex'.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re, optparse
<ide>
<ide><path>doc/pyrex/run_test.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpyx import test
<ide> test()
<ide><path>doc/pyrex/setup.py
<ide> http://www.scipy.org/Cookbook/ArrayStruct_and_Pyrex
<ide>
<ide> """
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> from distutils.core import setup
<ide> from distutils.extension import Extension
<ide><path>doc/source/conf.py
<ide> # -*- coding: utf-8 -*-
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, os, re
<ide>
<ide><path>doc/sphinxext/numpydoc/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from .numpydoc import setup
<ide><path>doc/sphinxext/numpydoc/comment_eater.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> if sys.version_info[0] >= 3:
<ide><path>doc/sphinxext/numpydoc/compiler_unparse.py
<ide> fixme: We may want to move to using _ast trees because the compiler for
<ide> them is about 6 times faster than compiler.compile.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
<ide><path>doc/sphinxext/numpydoc/docscrape.py
<ide> """Extract reference documentation from the NumPy source tree.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<del>import sys
<ide> import inspect
<ide> import textwrap
<ide> import re
<ide> import pydoc
<ide> from warnings import warn
<ide> import collections
<ide>
<del>if sys.version_info[0] >= 3:
<del> from io import StringIO
<del>else:
<del> from io import StringIO
<ide>
<ide> class Reader(object):
<ide> """A line-based string reader.
<ide><path>doc/sphinxext/numpydoc/docscrape_sphinx.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re, inspect, textwrap, pydoc
<ide> import sphinx
<ide><path>doc/sphinxext/numpydoc/linkcode.py
<ide> :license: BSD, see LICENSE for details.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import warnings
<ide> import collections
<ide><path>doc/sphinxext/numpydoc/numpydoc.py
<ide> .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sphinx
<ide> import collections
<ide><path>doc/sphinxext/numpydoc/phantom_import.py
<ide> .. [1] http://code.google.com/p/pydocweb
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import imp, sys, compiler, types, os, inspect, re
<ide>
<ide><path>doc/sphinxext/numpydoc/plot_directive.py
<ide> to make them appear side-by-side, or in floats.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
<ide> import sphinx
<ide><path>doc/sphinxext/numpydoc/tests/test_docscrape.py
<ide> # -*- encoding:utf-8 -*-
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, textwrap
<ide>
<ide><path>doc/sphinxext/numpydoc/tests/test_linkcode.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpydoc.linkcode
<ide>
<ide><path>doc/sphinxext/numpydoc/tests/test_phantom_import.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpydoc.phantom_import
<ide>
<ide><path>doc/sphinxext/numpydoc/tests/test_plot_directive.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpydoc.plot_directive
<ide>
<ide><path>doc/sphinxext/numpydoc/tests/test_traitsdoc.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpydoc.traitsdoc
<ide>
<ide><path>doc/sphinxext/numpydoc/traitsdoc.py
<ide> .. [2] http://code.enthought.com/projects/traits/
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import inspect
<ide> import os
<ide><path>doc/sphinxext/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import setuptools
<ide> from distutils.core import setup
<ide><path>doc/summarize.py
<ide> Show a summary about which Numpy functions are documented and which are not.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os, glob, re, sys, inspect, optparse
<ide> import collections
<ide> def main():
<ide> for (filename, section, keyword, toctree) in locations:
<ide> in_sections.setdefault((filename, section, keyword), []).append(name)
<ide>
<del> print "Documented"
<del> print "==========\n"
<add> print("Documented")
<add> print("==========\n")
<ide>
<ide> last_filename = None
<ide> for (filename, section, keyword), names in sorted(in_sections.items()):
<ide> if filename != last_filename:
<del> print "--- %s\n" % filename
<add> print("--- %s\n" % filename)
<ide> last_filename = filename
<del> print " ** ", section
<del> print format_in_columns(sorted(names), options.cols)
<del> print "\n"
<del>
<del> print ""
<del> print "Undocumented"
<del> print "============\n"
<del> print format_in_columns(sorted(undocumented.keys()), options.cols)
<add> print(" ** ", section)
<add> print(format_in_columns(sorted(names), options.cols))
<add> print("\n")
<add>
<add> print("")
<add> print("Undocumented")
<add> print("============\n")
<add> print(format_in_columns(sorted(undocumented.keys()), options.cols))
<ide>
<ide> def check_numpy():
<ide> documented = get_documented(glob.glob(SOURCE_DIR + '/*.rst'))
<ide><path>doc/swig/test/setup.py
<ide> #! /usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> # System imports
<ide> from distutils.core import *
<ide><path>doc/swig/test/testArray.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def testView(self):
<ide> suite.addTest(unittest.makeSuite(Array2TestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing Classes of Module Array"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing Classes of Module Array")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>doc/swig/test/testFarray.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def testView(self):
<ide> suite.addTest(unittest.makeSuite(FarrayTestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing Classes of Module Farray"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing Classes of Module Farray")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>doc/swig/test/testFortran.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def __init__(self, methodName="runTests"):
<ide> # Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
<ide> def testSecondElementContiguous(self):
<ide> "Test luSplit function with a Fortran-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> second = Fortran.__dict__[self.typeStr + "SecondElement"]
<ide> matrix = np.arange(9).reshape(3, 3).astype(self.typeCode)
<ide> self.assertEquals(second(matrix), 3)
<ide>
<ide> def testSecondElementFortran(self):
<ide> "Test luSplit function with a Fortran-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> second = Fortran.__dict__[self.typeStr + "SecondElement"]
<ide> matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
<ide> self.typeCode)
<ide> self.assertEquals(second(matrix), 3)
<ide>
<ide> def testSecondElementObject(self):
<ide> "Test luSplit function with a Fortran-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> second = Fortran.__dict__[self.typeStr + "SecondElement"]
<ide> matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode)
<ide> self.assertEquals(second(matrix), 3)
<ide> def __init__(self, methodName="runTest"):
<ide> suite.addTest(unittest.makeSuite( doubleTestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing 2D Functions of Module Matrix"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing 2D Functions of Module Matrix")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>doc/swig/test/testMatrix.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def __init__(self, methodName="runTests"):
<ide> # Test (type IN_ARRAY2[ANY][ANY]) typemap
<ide> def testDet(self):
<ide> "Test det function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> det = Matrix.__dict__[self.typeStr + "Det"]
<ide> matrix = [[8,7],[6,9]]
<ide> self.assertEquals(det(matrix), 30)
<ide>
<ide> # Test (type IN_ARRAY2[ANY][ANY]) typemap
<ide> def testDetBadList(self):
<ide> "Test det function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> det = Matrix.__dict__[self.typeStr + "Det"]
<ide> matrix = [[8,7], ["e", "pi"]]
<ide> self.assertRaises(BadListError, det, matrix)
<ide>
<ide> # Test (type IN_ARRAY2[ANY][ANY]) typemap
<ide> def testDetWrongDim(self):
<ide> "Test det function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> det = Matrix.__dict__[self.typeStr + "Det"]
<ide> matrix = [8,7]
<ide> self.assertRaises(TypeError, det, matrix)
<ide>
<ide> # Test (type IN_ARRAY2[ANY][ANY]) typemap
<ide> def testDetWrongSize(self):
<ide> "Test det function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> det = Matrix.__dict__[self.typeStr + "Det"]
<ide> matrix = [[8,7,6], [5,4,3], [2,1,0]]
<ide> self.assertRaises(TypeError, det, matrix)
<ide>
<ide> # Test (type IN_ARRAY2[ANY][ANY]) typemap
<ide> def testDetNonContainer(self):
<ide> "Test det function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> det = Matrix.__dict__[self.typeStr + "Det"]
<ide> self.assertRaises(TypeError, det, None)
<ide>
<ide> # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testMax(self):
<ide> "Test max function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Matrix.__dict__[self.typeStr + "Max"]
<ide> matrix = [[6,5,4],[3,2,1]]
<ide> self.assertEquals(max(matrix), 6)
<ide>
<ide> # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testMaxBadList(self):
<ide> "Test max function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Matrix.__dict__[self.typeStr + "Max"]
<ide> matrix = [[6,"five",4], ["three", 2, "one"]]
<ide> self.assertRaises(BadListError, max, matrix)
<ide>
<ide> # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testMaxNonContainer(self):
<ide> "Test max function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Matrix.__dict__[self.typeStr + "Max"]
<ide> self.assertRaises(TypeError, max, None)
<ide>
<ide> # Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testMaxWrongDim(self):
<ide> "Test max function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Matrix.__dict__[self.typeStr + "Max"]
<ide> self.assertRaises(TypeError, max, [0, 1, 2, 3])
<ide>
<ide> # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
<ide> def testMin(self):
<ide> "Test min function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Matrix.__dict__[self.typeStr + "Min"]
<ide> matrix = [[9,8],[7,6],[5,4]]
<ide> self.assertEquals(min(matrix), 4)
<ide>
<ide> # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
<ide> def testMinBadList(self):
<ide> "Test min function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Matrix.__dict__[self.typeStr + "Min"]
<ide> matrix = [["nine","eight"], ["seven","six"]]
<ide> self.assertRaises(BadListError, min, matrix)
<ide>
<ide> # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
<ide> def testMinWrongDim(self):
<ide> "Test min function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Matrix.__dict__[self.typeStr + "Min"]
<ide> self.assertRaises(TypeError, min, [1,3,5,7,9])
<ide>
<ide> # Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
<ide> def testMinNonContainer(self):
<ide> "Test min function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Matrix.__dict__[self.typeStr + "Min"]
<ide> self.assertRaises(TypeError, min, False)
<ide>
<ide> # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
<ide> def testScale(self):
<ide> "Test scale function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Matrix.__dict__[self.typeStr + "Scale"]
<ide> matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode)
<ide> scale(matrix,4)
<ide> def testScale(self):
<ide> # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
<ide> def testScaleWrongDim(self):
<ide> "Test scale function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Matrix.__dict__[self.typeStr + "Scale"]
<ide> matrix = np.array([1,2,2,1],self.typeCode)
<ide> self.assertRaises(TypeError, scale, matrix)
<ide>
<ide> # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
<ide> def testScaleWrongSize(self):
<ide> "Test scale function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Matrix.__dict__[self.typeStr + "Scale"]
<ide> matrix = np.array([[1,2],[2,1]],self.typeCode)
<ide> self.assertRaises(TypeError, scale, matrix)
<ide>
<ide> # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
<ide> def testScaleWrongType(self):
<ide> "Test scale function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Matrix.__dict__[self.typeStr + "Scale"]
<ide> matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c')
<ide> self.assertRaises(TypeError, scale, matrix)
<ide>
<ide> # Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
<ide> def testScaleNonArray(self):
<ide> "Test scale function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Matrix.__dict__[self.typeStr + "Scale"]
<ide> matrix = [[1,2,3],[2,1,2],[3,2,1]]
<ide> self.assertRaises(TypeError, scale, matrix)
<ide>
<ide> # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testFloor(self):
<ide> "Test floor function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Matrix.__dict__[self.typeStr + "Floor"]
<ide> matrix = np.array([[6,7],[8,9]],self.typeCode)
<ide> floor(matrix,7)
<ide> def testFloor(self):
<ide> # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testFloorWrongDim(self):
<ide> "Test floor function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Matrix.__dict__[self.typeStr + "Floor"]
<ide> matrix = np.array([6,7,8,9],self.typeCode)
<ide> self.assertRaises(TypeError, floor, matrix)
<ide>
<ide> # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testFloorWrongType(self):
<ide> "Test floor function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Matrix.__dict__[self.typeStr + "Floor"]
<ide> matrix = np.array([[6,7], [8,9]],'c')
<ide> self.assertRaises(TypeError, floor, matrix)
<ide>
<ide> # Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
<ide> def testFloorNonArray(self):
<ide> "Test floor function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Matrix.__dict__[self.typeStr + "Floor"]
<ide> matrix = [[6,7], [8,9]]
<ide> self.assertRaises(TypeError, floor, matrix)
<ide>
<ide> # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
<ide> def testCeil(self):
<ide> "Test ceil function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Matrix.__dict__[self.typeStr + "Ceil"]
<ide> matrix = np.array([[1,2],[3,4]],self.typeCode)
<ide> ceil(matrix,3)
<ide> def testCeil(self):
<ide> # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
<ide> def testCeilWrongDim(self):
<ide> "Test ceil function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Matrix.__dict__[self.typeStr + "Ceil"]
<ide> matrix = np.array([1,2,3,4],self.typeCode)
<ide> self.assertRaises(TypeError, ceil, matrix)
<ide>
<ide> # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
<ide> def testCeilWrongType(self):
<ide> "Test ceil function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Matrix.__dict__[self.typeStr + "Ceil"]
<ide> matrix = np.array([[1,2], [3,4]],'c')
<ide> self.assertRaises(TypeError, ceil, matrix)
<ide>
<ide> # Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
<ide> def testCeilNonArray(self):
<ide> "Test ceil function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Matrix.__dict__[self.typeStr + "Ceil"]
<ide> matrix = [[1,2], [3,4]]
<ide> self.assertRaises(TypeError, ceil, matrix)
<ide>
<ide> # Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap
<ide> def testLUSplit(self):
<ide> "Test luSplit function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> luSplit = Matrix.__dict__[self.typeStr + "LUSplit"]
<ide> lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]])
<ide> self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True)
<ide> def __init__(self, methodName="runTest"):
<ide> suite.addTest(unittest.makeSuite( doubleTestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing 2D Functions of Module Matrix"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing 2D Functions of Module Matrix")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>doc/swig/test/testTensor.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def __init__(self, methodName="runTests"):
<ide> # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testNorm(self):
<ide> "Test norm function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> norm = Tensor.__dict__[self.typeStr + "Norm"]
<ide> tensor = [[[0,1], [2,3]],
<ide> [[3,2], [1,0]]]
<ide> def testNorm(self):
<ide> # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testNormBadList(self):
<ide> "Test norm function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> norm = Tensor.__dict__[self.typeStr + "Norm"]
<ide> tensor = [[[0,"one"],[2,3]],
<ide> [[3,"two"],[1,0]]]
<ide> def testNormBadList(self):
<ide> # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testNormWrongDim(self):
<ide> "Test norm function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> norm = Tensor.__dict__[self.typeStr + "Norm"]
<ide> tensor = [[0,1,2,3],
<ide> [3,2,1,0]]
<ide> def testNormWrongDim(self):
<ide> # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testNormWrongSize(self):
<ide> "Test norm function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> norm = Tensor.__dict__[self.typeStr + "Norm"]
<ide> tensor = [[[0,1,0], [2,3,2]],
<ide> [[3,2,3], [1,0,1]]]
<ide> def testNormWrongSize(self):
<ide> # Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testNormNonContainer(self):
<ide> "Test norm function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> norm = Tensor.__dict__[self.typeStr + "Norm"]
<ide> self.assertRaises(TypeError, norm, None)
<ide>
<ide> # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testMax(self):
<ide> "Test max function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Tensor.__dict__[self.typeStr + "Max"]
<ide> tensor = [[[1,2], [3,4]],
<ide> [[5,6], [7,8]]]
<ide> def testMax(self):
<ide> # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testMaxBadList(self):
<ide> "Test max function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Tensor.__dict__[self.typeStr + "Max"]
<ide> tensor = [[[1,"two"], [3,4]],
<ide> [[5,"six"], [7,8]]]
<ide> def testMaxBadList(self):
<ide> # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testMaxNonContainer(self):
<ide> "Test max function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Tensor.__dict__[self.typeStr + "Max"]
<ide> self.assertRaises(TypeError, max, None)
<ide>
<ide> # Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testMaxWrongDim(self):
<ide> "Test max function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> max = Tensor.__dict__[self.typeStr + "Max"]
<ide> self.assertRaises(TypeError, max, [0, -1, 2, -3])
<ide>
<ide> # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
<ide> def testMin(self):
<ide> "Test min function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Tensor.__dict__[self.typeStr + "Min"]
<ide> tensor = [[[9,8], [7,6]],
<ide> [[5,4], [3,2]]]
<ide> def testMin(self):
<ide> # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
<ide> def testMinBadList(self):
<ide> "Test min function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Tensor.__dict__[self.typeStr + "Min"]
<ide> tensor = [[["nine",8], [7,6]],
<ide> [["five",4], [3,2]]]
<ide> def testMinBadList(self):
<ide> # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
<ide> def testMinNonContainer(self):
<ide> "Test min function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Tensor.__dict__[self.typeStr + "Min"]
<ide> self.assertRaises(TypeError, min, True)
<ide>
<ide> # Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
<ide> def testMinWrongDim(self):
<ide> "Test min function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> min = Tensor.__dict__[self.typeStr + "Min"]
<ide> self.assertRaises(TypeError, min, [[1,3],[5,7]])
<ide>
<ide> # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testScale(self):
<ide> "Test scale function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Tensor.__dict__[self.typeStr + "Scale"]
<ide> tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]],
<ide> [[0,1,0], [1,0,1], [0,1,0]],
<ide> def testScale(self):
<ide> # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testScaleWrongType(self):
<ide> "Test scale function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Tensor.__dict__[self.typeStr + "Scale"]
<ide> tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]],
<ide> [[0,1,0], [1,0,1], [0,1,0]],
<ide> def testScaleWrongType(self):
<ide> # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testScaleWrongDim(self):
<ide> "Test scale function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Tensor.__dict__[self.typeStr + "Scale"]
<ide> tensor = np.array([[1,0,1], [0,1,0], [1,0,1],
<ide> [0,1,0], [1,0,1], [0,1,0]],self.typeCode)
<ide> def testScaleWrongDim(self):
<ide> # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testScaleWrongSize(self):
<ide> "Test scale function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Tensor.__dict__[self.typeStr + "Scale"]
<ide> tensor = np.array([[[1,0], [0,1], [1,0]],
<ide> [[0,1], [1,0], [0,1]],
<ide> def testScaleWrongSize(self):
<ide> # Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testScaleNonArray(self):
<ide> "Test scale function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> scale = Tensor.__dict__[self.typeStr + "Scale"]
<ide> self.assertRaises(TypeError, scale, True)
<ide>
<ide> # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testFloor(self):
<ide> "Test floor function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Tensor.__dict__[self.typeStr + "Floor"]
<ide> tensor = np.array([[[1,2], [3,4]],
<ide> [[5,6], [7,8]]],self.typeCode)
<ide> def testFloor(self):
<ide> # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testFloorWrongType(self):
<ide> "Test floor function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Tensor.__dict__[self.typeStr + "Floor"]
<ide> tensor = np.array([[[1,2], [3,4]],
<ide> [[5,6], [7,8]]],'c')
<ide> def testFloorWrongType(self):
<ide> # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testFloorWrongDim(self):
<ide> "Test floor function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Tensor.__dict__[self.typeStr + "Floor"]
<ide> tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode)
<ide> self.assertRaises(TypeError, floor, tensor)
<ide>
<ide> # Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
<ide> def testFloorNonArray(self):
<ide> "Test floor function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> floor = Tensor.__dict__[self.typeStr + "Floor"]
<ide> self.assertRaises(TypeError, floor, object)
<ide>
<ide> # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
<ide> def testCeil(self):
<ide> "Test ceil function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Tensor.__dict__[self.typeStr + "Ceil"]
<ide> tensor = np.array([[[9,8], [7,6]],
<ide> [[5,4], [3,2]]],self.typeCode)
<ide> def testCeil(self):
<ide> # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
<ide> def testCeilWrongType(self):
<ide> "Test ceil function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Tensor.__dict__[self.typeStr + "Ceil"]
<ide> tensor = np.array([[[9,8], [7,6]],
<ide> [[5,4], [3,2]]],'c')
<ide> def testCeilWrongType(self):
<ide> # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
<ide> def testCeilWrongDim(self):
<ide> "Test ceil function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Tensor.__dict__[self.typeStr + "Ceil"]
<ide> tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode)
<ide> self.assertRaises(TypeError, ceil, tensor)
<ide>
<ide> # Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
<ide> def testCeilNonArray(self):
<ide> "Test ceil function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ceil = Tensor.__dict__[self.typeStr + "Ceil"]
<ide> tensor = [[[9,8], [7,6]],
<ide> [[5,4], [3,2]]]
<ide> def testCeilNonArray(self):
<ide> # Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
<ide> def testLUSplit(self):
<ide> "Test luSplit function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> luSplit = Tensor.__dict__[self.typeStr + "LUSplit"]
<ide> lower, upper = luSplit([[[1,1], [1,1]],
<ide> [[1,1], [1,1]]])
<ide> def __init__(self, methodName="runTest"):
<ide> suite.addTest(unittest.makeSuite( doubleTestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing 3D Functions of Module Tensor"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing 3D Functions of Module Tensor")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>doc/swig/test/testVector.py
<ide> #! /usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # System imports
<ide> from distutils.util import get_platform
<ide> def __init__(self, methodName="runTest"):
<ide> # Test the (type IN_ARRAY1[ANY]) typemap
<ide> def testLength(self):
<ide> "Test length function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> length = Vector.__dict__[self.typeStr + "Length"]
<ide> self.assertEquals(length([5, 12, 0]), 13)
<ide>
<ide> # Test the (type IN_ARRAY1[ANY]) typemap
<ide> def testLengthBadList(self):
<ide> "Test length function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> length = Vector.__dict__[self.typeStr + "Length"]
<ide> self.assertRaises(BadListError, length, [5, "twelve", 0])
<ide>
<ide> # Test the (type IN_ARRAY1[ANY]) typemap
<ide> def testLengthWrongSize(self):
<ide> "Test length function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> length = Vector.__dict__[self.typeStr + "Length"]
<ide> self.assertRaises(TypeError, length, [5, 12])
<ide>
<ide> # Test the (type IN_ARRAY1[ANY]) typemap
<ide> def testLengthWrongDim(self):
<ide> "Test length function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> length = Vector.__dict__[self.typeStr + "Length"]
<ide> self.assertRaises(TypeError, length, [[1,2], [3,4]])
<ide>
<ide> # Test the (type IN_ARRAY1[ANY]) typemap
<ide> def testLengthNonContainer(self):
<ide> "Test length function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> length = Vector.__dict__[self.typeStr + "Length"]
<ide> self.assertRaises(TypeError, length, None)
<ide>
<ide> # Test the (type* IN_ARRAY1, int DIM1) typemap
<ide> def testProd(self):
<ide> "Test prod function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> prod = Vector.__dict__[self.typeStr + "Prod"]
<ide> self.assertEquals(prod([1,2,3,4]), 24)
<ide>
<ide> # Test the (type* IN_ARRAY1, int DIM1) typemap
<ide> def testProdBadList(self):
<ide> "Test prod function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> prod = Vector.__dict__[self.typeStr + "Prod"]
<ide> self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]])
<ide>
<ide> # Test the (type* IN_ARRAY1, int DIM1) typemap
<ide> def testProdWrongDim(self):
<ide> "Test prod function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> prod = Vector.__dict__[self.typeStr + "Prod"]
<ide> self.assertRaises(TypeError, prod, [[1,2], [8,9]])
<ide>
<ide> # Test the (type* IN_ARRAY1, int DIM1) typemap
<ide> def testProdNonContainer(self):
<ide> "Test prod function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> prod = Vector.__dict__[self.typeStr + "Prod"]
<ide> self.assertRaises(TypeError, prod, None)
<ide>
<ide> # Test the (int DIM1, type* IN_ARRAY1) typemap
<ide> def testSum(self):
<ide> "Test sum function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> sum = Vector.__dict__[self.typeStr + "Sum"]
<ide> self.assertEquals(sum([5,6,7,8]), 26)
<ide>
<ide> # Test the (int DIM1, type* IN_ARRAY1) typemap
<ide> def testSumBadList(self):
<ide> "Test sum function with bad list"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> sum = Vector.__dict__[self.typeStr + "Sum"]
<ide> self.assertRaises(BadListError, sum, [3,4, 5, "pi"])
<ide>
<ide> # Test the (int DIM1, type* IN_ARRAY1) typemap
<ide> def testSumWrongDim(self):
<ide> "Test sum function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> sum = Vector.__dict__[self.typeStr + "Sum"]
<ide> self.assertRaises(TypeError, sum, [[3,4], [5,6]])
<ide>
<ide> # Test the (int DIM1, type* IN_ARRAY1) typemap
<ide> def testSumNonContainer(self):
<ide> "Test sum function with non-container"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> sum = Vector.__dict__[self.typeStr + "Sum"]
<ide> self.assertRaises(TypeError, sum, True)
<ide>
<ide> # Test the (type INPLACE_ARRAY1[ANY]) typemap
<ide> def testReverse(self):
<ide> "Test reverse function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> reverse = Vector.__dict__[self.typeStr + "Reverse"]
<ide> vector = np.array([1,2,4],self.typeCode)
<ide> reverse(vector)
<ide> def testReverse(self):
<ide> # Test the (type INPLACE_ARRAY1[ANY]) typemap
<ide> def testReverseWrongDim(self):
<ide> "Test reverse function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> reverse = Vector.__dict__[self.typeStr + "Reverse"]
<ide> vector = np.array([[1,2], [3,4]],self.typeCode)
<ide> self.assertRaises(TypeError, reverse, vector)
<ide>
<ide> # Test the (type INPLACE_ARRAY1[ANY]) typemap
<ide> def testReverseWrongSize(self):
<ide> "Test reverse function with wrong size"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> reverse = Vector.__dict__[self.typeStr + "Reverse"]
<ide> vector = np.array([9,8,7,6,5,4],self.typeCode)
<ide> self.assertRaises(TypeError, reverse, vector)
<ide>
<ide> # Test the (type INPLACE_ARRAY1[ANY]) typemap
<ide> def testReverseWrongType(self):
<ide> "Test reverse function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> reverse = Vector.__dict__[self.typeStr + "Reverse"]
<ide> vector = np.array([1,2,4],'c')
<ide> self.assertRaises(TypeError, reverse, vector)
<ide>
<ide> # Test the (type INPLACE_ARRAY1[ANY]) typemap
<ide> def testReverseNonArray(self):
<ide> "Test reverse function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> reverse = Vector.__dict__[self.typeStr + "Reverse"]
<ide> self.assertRaises(TypeError, reverse, [2,4,6])
<ide>
<ide> # Test the (type* INPLACE_ARRAY1, int DIM1) typemap
<ide> def testOnes(self):
<ide> "Test ones function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ones = Vector.__dict__[self.typeStr + "Ones"]
<ide> vector = np.zeros(5,self.typeCode)
<ide> ones(vector)
<ide> def testOnes(self):
<ide> # Test the (type* INPLACE_ARRAY1, int DIM1) typemap
<ide> def testOnesWrongDim(self):
<ide> "Test ones function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ones = Vector.__dict__[self.typeStr + "Ones"]
<ide> vector = np.zeros((5,5),self.typeCode)
<ide> self.assertRaises(TypeError, ones, vector)
<ide>
<ide> # Test the (type* INPLACE_ARRAY1, int DIM1) typemap
<ide> def testOnesWrongType(self):
<ide> "Test ones function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ones = Vector.__dict__[self.typeStr + "Ones"]
<ide> vector = np.zeros((5,5),'c')
<ide> self.assertRaises(TypeError, ones, vector)
<ide>
<ide> # Test the (type* INPLACE_ARRAY1, int DIM1) typemap
<ide> def testOnesNonArray(self):
<ide> "Test ones function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> ones = Vector.__dict__[self.typeStr + "Ones"]
<ide> self.assertRaises(TypeError, ones, [2,4,6,8])
<ide>
<ide> # Test the (int DIM1, type* INPLACE_ARRAY1) typemap
<ide> def testZeros(self):
<ide> "Test zeros function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> zeros = Vector.__dict__[self.typeStr + "Zeros"]
<ide> vector = np.ones(5,self.typeCode)
<ide> zeros(vector)
<ide> def testZeros(self):
<ide> # Test the (int DIM1, type* INPLACE_ARRAY1) typemap
<ide> def testZerosWrongDim(self):
<ide> "Test zeros function with wrong dimensions"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> zeros = Vector.__dict__[self.typeStr + "Zeros"]
<ide> vector = np.ones((5,5),self.typeCode)
<ide> self.assertRaises(TypeError, zeros, vector)
<ide>
<ide> # Test the (int DIM1, type* INPLACE_ARRAY1) typemap
<ide> def testZerosWrongType(self):
<ide> "Test zeros function with wrong type"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> zeros = Vector.__dict__[self.typeStr + "Zeros"]
<ide> vector = np.ones(6,'c')
<ide> self.assertRaises(TypeError, zeros, vector)
<ide>
<ide> # Test the (int DIM1, type* INPLACE_ARRAY1) typemap
<ide> def testZerosNonArray(self):
<ide> "Test zeros function with non-array"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> zeros = Vector.__dict__[self.typeStr + "Zeros"]
<ide> self.assertRaises(TypeError, zeros, [1,3,5,7,9])
<ide>
<ide> # Test the (type ARGOUT_ARRAY1[ANY]) typemap
<ide> def testEOSplit(self):
<ide> "Test eoSplit function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
<ide> even, odd = eoSplit([1,2,3])
<ide> self.assertEquals((even == [1,0,3]).all(), True)
<ide> def testEOSplit(self):
<ide> # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
<ide> def testTwos(self):
<ide> "Test twos function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> twos = Vector.__dict__[self.typeStr + "Twos"]
<ide> vector = twos(5)
<ide> self.assertEquals((vector == [2,2,2,2,2]).all(), True)
<ide>
<ide> # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
<ide> def testTwosNonInt(self):
<ide> "Test twos function with non-integer dimension"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> twos = Vector.__dict__[self.typeStr + "Twos"]
<ide> self.assertRaises(TypeError, twos, 5.0)
<ide>
<ide> # Test the (int DIM1, type* ARGOUT_ARRAY1) typemap
<ide> def testThrees(self):
<ide> "Test threes function"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> threes = Vector.__dict__[self.typeStr + "Threes"]
<ide> vector = threes(6)
<ide> self.assertEquals((vector == [3,3,3,3,3,3]).all(), True)
<ide>
<ide> # Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
<ide> def testThreesNonInt(self):
<ide> "Test threes function with non-integer dimension"
<del> print >>sys.stderr, self.typeStr, "... ",
<add> print(self.typeStr, "... ", end=' ', file=sys.stderr)
<ide> threes = Vector.__dict__[self.typeStr + "Threes"]
<ide> self.assertRaises(TypeError, threes, "threes")
<ide>
<ide> def __init__(self, methodName="runTest"):
<ide> suite.addTest(unittest.makeSuite( doubleTestCase))
<ide>
<ide> # Execute the test suite
<del> print "Testing 1D Functions of Module Vector"
<del> print "NumPy version", np.__version__
<del> print
<add> print("Testing 1D Functions of Module Vector")
<add> print("NumPy version", np.__version__)
<add> print()
<ide> result = unittest.TextTestRunner(verbosity=2).run(suite)
<ide> sys.exit(len(result.errors) + len(result.failures))
<ide><path>numpy/__init__.py
<ide> Exceptions to this rule are documented.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/_import_tools.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide> def _obj2repr(self,obj):
<ide>
<ide> def log(self,mess):
<ide> if self.verbose>1:
<del> print >> sys.stderr, str(mess)
<add> print(str(mess), file=sys.stderr)
<ide> def warn(self,mess):
<ide> if self.verbose>=0:
<del> print >> sys.stderr, str(mess)
<add> print(str(mess), file=sys.stderr)
<ide> def error(self,mess):
<ide> if self.verbose!=-1:
<del> print >> sys.stderr, str(mess)
<add> print(str(mess), file=sys.stderr)
<ide>
<ide> def _get_doc_title(self, info_module):
<ide> """ Get the title from a package info.py file.
<ide> class PackageLoaderDebug(PackageLoader):
<ide> def _execcmd(self,cmdstr):
<ide> """ Execute command in parent_frame."""
<ide> frame = self.parent_frame
<del> print 'Executing',`cmdstr`,'...',
<add> print('Executing',`cmdstr`,'...', end=' ')
<ide> sys.stdout.flush()
<ide> exec (cmdstr, frame.f_globals,frame.f_locals)
<del> print 'ok'
<add> print('ok')
<ide> sys.stdout.flush()
<ide> return
<ide>
<ide><path>numpy/add_newdocs.py
<ide> core/fromnumeric.py, core/defmatrix.py up-to-date.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.lib import add_newdoc
<ide>
<ide><path>numpy/build_utils/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/build_utils/common.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import copy
<ide><path>numpy/build_utils/waf.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import re
<ide><path>numpy/compat/__init__.py
<ide> * we may only need a small subset of the copied library/module
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from . import _inspect
<ide> from . import py3k
<ide><path>numpy/compat/_inspect.py
<ide> no overhead.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import types
<ide>
<ide> def convert(name, locals=locals,
<ide> def foo(x, y, z=None):
<ide> return None
<ide>
<del> print inspect.getargs(foo.__code__)
<del> print getargs(foo.__code__)
<add> print(inspect.getargs(foo.__code__))
<add> print(getargs(foo.__code__))
<ide>
<del> print inspect.getargspec(foo)
<del> print getargspec(foo)
<add> print(inspect.getargspec(foo))
<add> print(getargspec(foo))
<ide>
<del> print inspect.formatargspec(*inspect.getargspec(foo))
<del> print formatargspec(*getargspec(foo))
<add> print(inspect.formatargspec(*inspect.getargspec(foo)))
<add> print(formatargspec(*getargspec(foo)))
<ide><path>numpy/compat/py3k.py
<ide> Python 3 compatibility tools.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
<ide> 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
<ide><path>numpy/compat/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide><path>numpy/core/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from .info import __doc__
<ide> from numpy.version import version as __version__
<ide><path>numpy/core/_internal.py
<ide> Some things are more easily handled Python.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re
<ide> import sys
<ide><path>numpy/core/_methods.py
<ide> and the Python code for the NumPy-namespace function
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.core import multiarray as mu
<ide> from numpy.core import umath as um
<ide><path>numpy/core/arrayprint.py
<ide> $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ["array2string", "set_printoptions", "get_printoptions"]
<ide> __docformat__ = 'restructuredtext'
<ide><path>numpy/core/code_generators/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/core/code_generators/cversions.py
<ide> The API has is defined by numpy_api_order and ufunc_api_order.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from os.path import dirname
<ide>
<ide>
<ide> if __name__ == '__main__':
<ide> curdir = dirname(__file__)
<del> print fullapi_hash(numpy_api.full_api)
<add> print(fullapi_hash(numpy_api.full_api))
<ide><path>numpy/core/code_generators/genapi.py
<ide> specified.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, os, re
<ide> try:
<ide> def find_functions(filename, tag='API'):
<ide> else:
<ide> function_args.append(line)
<ide> except:
<del> print(filename, lineno+1)
<add> print(filename, lineno + 1)
<ide> raise
<ide> fo.close()
<ide> return functions
<ide> def main():
<ide> print(func)
<ide> ah = func.api_hash()
<ide> m.update(ah)
<del> print(hex(int(ah,16)))
<del> print(hex(int(m.hexdigest()[:8],16)))
<add> print(hex(int(ah, 16)))
<add> print(hex(int(m.hexdigest()[:8], 16)))
<ide>
<ide> if __name__ == '__main__':
<ide> main()
<ide><path>numpy/core/code_generators/generate_numpy_api.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import os
<ide> import genapi
<ide><path>numpy/core/code_generators/generate_ufunc_api.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import os
<ide> import genapi
<ide><path>numpy/core/code_generators/generate_umath.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import os
<ide> import re
<ide><path>numpy/core/code_generators/numpy_api.py
<ide> exception, so it should hopefully not get unnoticed).
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> multiarray_global_vars = {
<ide> 'NPY_NUMUSERTYPES': 7,
<ide><path>numpy/core/code_generators/ufunc_docstrings.py
<ide> at compile time.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> docdict = {}
<ide>
<ide><path>numpy/core/defchararray.py
<ide> The preferred alias for `defchararray` is `numpy.char`.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from .numerictypes import string_, unicode_, integer, object_, bool_, character
<ide><path>numpy/core/fromnumeric.py
<ide> """Module containing non-deprecated functions borrowed from Numeric.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __docformat__ = "restructuredtext en"
<ide>
<ide><path>numpy/core/function_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['logspace', 'linspace']
<ide>
<ide><path>numpy/core/getlimits.py
<ide> """Machine limits for Float32 and Float64 and (long double) if available...
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['finfo','iinfo']
<ide>
<ide> def __repr__(self):
<ide>
<ide> if __name__ == '__main__':
<ide> f = finfo(ntypes.single)
<del> print 'single epsilon:',f.eps
<del> print 'single tiny:',f.tiny
<add> print('single epsilon:',f.eps)
<add> print('single tiny:',f.tiny)
<ide> f = finfo(ntypes.float)
<del> print 'float epsilon:',f.eps
<del> print 'float tiny:',f.tiny
<add> print('float epsilon:',f.eps)
<add> print('float tiny:',f.tiny)
<ide> f = finfo(ntypes.longfloat)
<del> print 'longfloat epsilon:',f.eps
<del> print 'longfloat tiny:',f.tiny
<add> print('longfloat epsilon:',f.eps)
<add> print('longfloat tiny:',f.tiny)
<ide><path>numpy/core/info.py
<ide> arccosh arcsinh arctanh
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> depends = ['testing']
<ide> global_symbols = ['*']
<ide><path>numpy/core/machar.py
<ide> Author: Pearu Peterson, September 2003
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['MachAr']
<ide>
<ide> def __str__(self):
<ide>
<ide>
<ide> if __name__ == '__main__':
<del> print MachAr()
<add> print(MachAr())
<ide><path>numpy/core/memmap.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['memmap']
<ide>
<ide><path>numpy/core/numeric.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import warnings
<ide><path>numpy/core/numerictypes.py
<ide> \\-> object_ (not used much) (kind=O)
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # we add more at the bottom
<ide> __all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
<ide><path>numpy/core/records.py
<ide> array([ 2., 2.])
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # All of the functions allow formats to be a dtype
<ide> __all__ = ['record', 'recarray', 'format_parser']
<ide><path>numpy/core/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import imp
<ide> import os
<ide> def win32_checks(deflist):
<ide> a = get_build_architecture()
<ide>
<ide> # Distutils hack on AMD64 on windows
<del> print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \
<add> print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
<ide> (a, os.name, sys.platform))
<ide> if a == 'AMD64':
<ide> deflist.append('DISTUTILS_USE_SDK')
<ide><path>numpy/core/setup_common.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # Code common to build tools
<ide> import sys
<ide><path>numpy/core/shape_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack']
<ide>
<ide><path>numpy/core/src/multiarray/testcalcs.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from scipy import weave
<ide>
<ide><path>numpy/core/tests/test_api.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/core/tests/test_arrayprint.py
<ide> #!/usr/bin/python
<ide> # -*- coding: utf-8 -*-
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import numpy as np
<ide><path>numpy/core/tests/test_blasdot.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> import sys
<ide><path>numpy/core/tests/test_datetime.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os, pickle
<ide> import numpy
<ide><path>numpy/core/tests/test_defchararray.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy.core import *
<ide><path>numpy/core/tests/test_deprecations.py
<ide> to document how deprecations should eventually be turned into errors.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import warnings
<ide><path>numpy/core/tests/test_dtype.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import numpy as np
<ide><path>numpy/core/tests/test_einsum.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from decimal import Decimal
<ide><path>numpy/core/tests/test_errstate.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import platform
<ide>
<ide><path>numpy/core/tests/test_function_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import logspace, linspace
<ide><path>numpy/core/tests/test_getlimits.py
<ide> """ Test functions for limits module.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide>
<ide><path>numpy/core/tests/test_half.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import platform
<ide>
<ide><path>numpy/core/tests/test_indexerrors.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import TestCase, run_module_suite, assert_raises, assert_equal, assert_
<ide><path>numpy/core/tests/test_indexing.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.compat import asbytes
<ide><path>numpy/core/tests/test_item_selection.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import *
<ide><path>numpy/core/tests/test_machar.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide>
<ide><path>numpy/core/tests/test_memmap.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from tempfile import NamedTemporaryFile, mktemp
<ide><path>numpy/core/tests/test_multiarray.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import tempfile
<ide> import sys
<ide><path>numpy/core/tests/test_multiarray_assignment.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import TestCase
<ide><path>numpy/core/tests/test_nditer.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy import array, arange, nditer, all
<ide><path>numpy/core/tests/test_numeric.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import platform
<ide><path>numpy/core/tests/test_numerictypes.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/core/tests/test_print.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import *
<ide> def _test_redirected_print(x, tp, ref=None):
<ide> stdout = sys.stdout
<ide> try:
<ide> sys.stdout = file_tp
<del> print tp(x)
<add> print(tp(x))
<ide> sys.stdout = file
<ide> if ref:
<del> print ref
<add> print(ref)
<ide> else:
<del> print x
<add> print(x)
<ide> finally:
<ide> sys.stdout = stdout
<ide>
<ide><path>numpy/core/tests/test_records.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from os import path
<ide> import numpy as np
<ide><path>numpy/core/tests/test_regression.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import pickle
<ide> import sys
<ide><path>numpy/core/tests/test_scalarmath.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/core/tests/test_scalarprint.py
<ide> """ Test printing of scalar types.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import TestCase, assert_, run_module_suite
<ide><path>numpy/core/tests/test_shape_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import warnings
<ide> import numpy as np
<ide><path>numpy/core/tests/test_ufunc.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/core/tests/test_umath.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import platform
<ide><path>numpy/core/tests/test_umath_complex.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import platform
<ide><path>numpy/core/tests/test_unicode.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/ctypeslib.py
<ide> >>> _lib.foo_func(out, len(out)) #doctest: +SKIP
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
<ide> 'c_intp', 'as_ctypes', 'as_array']
<ide><path>numpy/distutils/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/distutils/__version__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> major = 0
<ide> minor = 4
<ide><path>numpy/distutils/ccompiler.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re
<ide> import os
<ide><path>numpy/distutils/command/__init__.py
<ide> commands.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> def test_na_writable_attributes_deletion():
<ide> a = np.NA(2)
<ide><path>numpy/distutils/command/autodist.py
<ide> """This module implements additional tests ala autoconf which can be useful.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide>
<ide> # We put them here since they could be easily reused outside numpy.distutils
<ide><path>numpy/distutils/command/bdist_rpm.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/distutils/command/build.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/distutils/command/build_clib.py
<ide> """ Modified version of build_clib that handles fortran source files.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> from glob import glob
<ide><path>numpy/distutils/command/build_ext.py
<ide> """ Modified version of build_ext that handles fortran source files.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/distutils/command/build_py.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.command.build_py import build_py as old_build_py
<ide> from numpy.distutils.misc_util import is_string
<ide><path>numpy/distutils/command/build_scripts.py
<ide> """ Modified version of build_scripts that handles building scripts from functions.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.command.build_scripts import build_scripts as old_build_scripts
<ide> from numpy.distutils import log
<ide><path>numpy/distutils/command/build_src.py
<ide> """ Build swig, f2py, pyrex sources.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import re
<ide><path>numpy/distutils/command/config.py
<ide> # try_compile call. try_run works but is untested for most of Fortran
<ide> # compilers (they must define linker_exe first).
<ide> # Pearu Peterson
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os, signal
<ide> import warnings
<ide><path>numpy/distutils/command/config_compiler.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.core import Command
<ide> from numpy.distutils import log
<ide><path>numpy/distutils/command/develop.py
<ide> files with filenames.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from setuptools.command.develop import develop as old_develop
<ide>
<ide><path>numpy/distutils/command/egg_info.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from setuptools.command.egg_info import egg_info as _egg_info
<ide>
<ide><path>numpy/distutils/command/install.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> if 'setuptools' in sys.modules:
<ide><path>numpy/distutils/command/install_clib.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> from distutils.core import Command
<ide><path>numpy/distutils/command/install_data.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> have_setuptools = ('setuptools' in sys.modules)
<ide><path>numpy/distutils/command/install_headers.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> from distutils.command.install_headers import install_headers as old_install_headers
<ide><path>numpy/distutils/command/sdist.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> if 'setuptools' in sys.modules:
<ide><path>numpy/distutils/compat.py
<ide> numpy.distutils
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/distutils/conv_template.py
<ide>
<ide> This will exlude the pattern where var1 is value1 and var2 is value2 when
<ide> the result is being generated.
<del>
<add>
<ide>
<ide> In the main body each replace will use one entry from the list of named replacements
<ide>
<ide> 3, 3, jim
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide>
<ide> __all__ = ['process_str', 'process_file']
<ide> def resolve_includes(source):
<ide> if not os.path.isabs(fn):
<ide> fn = os.path.join(d,fn)
<ide> if os.path.isfile(fn):
<del> print ('Including file',fn)
<add> print('Including file',fn)
<ide> lines.extend(resolve_includes(fn))
<ide> else:
<ide> lines.append(line)
<ide><path>numpy/distutils/core.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from distutils.core import *
<ide><path>numpy/distutils/cpuinfo.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['cpu']
<ide>
<ide><path>numpy/distutils/environment.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> from distutils.dist import Distribution
<ide><path>numpy/distutils/exec_command.py
<ide> - Tests, that send messages to stderr, fail when executed from MSYS prompt
<ide> because the messages are lost at some point.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['exec_command','find_executable']
<ide>
<ide><path>numpy/distutils/extension.py
<ide> Overridden to support f2py.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __revision__ = "$Id: extension.py,v 1.1 2005/04/09 19:29:34 pearu Exp $"
<ide>
<ide><path>numpy/distutils/fcompiler/__init__.py
<ide> But note that FCompiler.executables is actually a dictionary of commands.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['FCompiler','new_fcompiler','show_fcompilers',
<ide> 'dummy_fortran_file']
<ide><path>numpy/distutils/fcompiler/absoft.py
<ide> # Notes:
<ide> # - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
<ide> # generated extension modules (works for f2py v2.45.241_1936 and up)
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide>
<ide><path>numpy/distutils/fcompiler/compaq.py
<ide>
<ide> #http://www.compaq.com/fortran/docs/
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/distutils/fcompiler/g95.py
<ide> # http://g95.sourceforge.net/
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.fcompiler import FCompiler
<ide>
<ide><path>numpy/distutils/fcompiler/gnu.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re
<ide> import os
<ide><path>numpy/distutils/fcompiler/hpux.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.fcompiler import FCompiler
<ide>
<ide><path>numpy/distutils/fcompiler/ibm.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import re
<ide><path>numpy/distutils/fcompiler/intel.py
<ide> # http://developer.intel.com/software/products/compilers/flin/
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/distutils/fcompiler/lahey.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide>
<ide><path>numpy/distutils/fcompiler/mips.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.cpuinfo import cpu
<ide> from numpy.distutils.fcompiler import FCompiler
<ide><path>numpy/distutils/fcompiler/nag.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.distutils.fcompiler import FCompiler
<ide><path>numpy/distutils/fcompiler/none.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.fcompiler import FCompiler
<ide>
<ide><path>numpy/distutils/fcompiler/pathf95.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.fcompiler import FCompiler
<ide>
<ide> def get_flags_debug(self):
<ide> from numpy.distutils.fcompiler import new_fcompiler
<ide> compiler = new_fcompiler(compiler='pathf95')
<ide> compiler.customize()
<del> print compiler.get_version()
<add> print(compiler.get_version())
<ide><path>numpy/distutils/fcompiler/pg.py
<ide> # http://www.pgroup.com
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.fcompiler import FCompiler
<ide> from sys import platform
<ide> class PGroupFCompiler(FCompiler):
<ide> }
<ide> pic_flags = ['-fpic']
<ide>
<del>
<add>
<ide> module_dir_switch = '-module '
<ide> module_include_switch = '-I'
<ide>
<ide> def get_flags_opt(self):
<ide> return ['-fast']
<ide> def get_flags_debug(self):
<ide> return ['-g']
<del>
<add>
<ide> if platform == 'darwin':
<ide> def get_flags_linker_so(self):
<ide> return ["-dynamic", '-undefined', 'dynamic_lookup']
<ide><path>numpy/distutils/fcompiler/sun.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.distutils.ccompiler import simple_version_match
<ide> from numpy.distutils.fcompiler import FCompiler
<ide><path>numpy/distutils/fcompiler/vast.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide>
<ide><path>numpy/distutils/from_template.py
<ide> <ctypereal=float,double,\\0,\\1>
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['process_str','process_file']
<ide>
<ide> def listrepl(mobj):
<ide> elif num == numsubs:
<ide> rules[r] = rule
<ide> else:
<del> print("Mismatch in number of replacements (base <%s=%s>)"\
<del> " for <%s=%s>. Ignoring." % (base_rule,
<del> ','.join(rules[base_rule]),
<del> r,thelist))
<add> print("Mismatch in number of replacements (base <%s=%s>)"
<add> " for <%s=%s>. Ignoring." %
<add> (base_rule, ','.join(rules[base_rule]), r,thelist))
<ide> if not rules:
<ide> return substr
<ide>
<ide> def resolve_includes(source):
<ide> if not os.path.isabs(fn):
<ide> fn = os.path.join(d,fn)
<ide> if os.path.isfile(fn):
<del> print ('Including file',fn)
<add> print('Including file', fn)
<ide> lines.extend(resolve_includes(fn))
<ide> else:
<ide> lines.append(line)
<ide><path>numpy/distutils/info.py
<ide> """
<ide> Enhanced distutils with Fortran compilers support and more.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> postpone_import = True
<ide><path>numpy/distutils/intelccompiler.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.unixccompiler import UnixCCompiler
<ide> from numpy.distutils.exec_command import find_executable
<ide><path>numpy/distutils/lib2def.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re
<ide> import sys
<ide> def parse_cmd():
<ide> elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib':
<ide> deffile, libfile = sys.argv[1:]
<ide> else:
<del> print "I'm assuming that your first argument is the library"
<del> print "and the second is the DEF file."
<add> print("I'm assuming that your first argument is the library")
<add> print("and the second is the DEF file.")
<ide> elif len(sys.argv) == 2:
<ide> if sys.argv[1][-4:] == '.def':
<ide> deffile = sys.argv[1]
<ide><path>numpy/distutils/line_endings.py
<ide> """ Functions for converting from DOS to UNIX line endings
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, re, os
<ide>
<ide> def dos2unix(file):
<ide> "Replace CRLF with LF in argument files. Print names of changed files."
<ide> if os.path.isdir(file):
<del> print file, "Directory!"
<add> print(file, "Directory!")
<ide> return
<ide>
<ide> data = open(file, "rb").read()
<ide> if '\0' in data:
<del> print file, "Binary!"
<add> print(file, "Binary!")
<ide> return
<ide>
<ide> newdata = re.sub("\r\n", "\n", data)
<ide> if newdata != data:
<del> print 'dos2unix:', file
<add> print('dos2unix:', file)
<ide> f = open(file, "wb")
<ide> f.write(newdata)
<ide> f.close()
<ide> return file
<ide> else:
<del> print file, 'ok'
<add> print(file, 'ok')
<ide>
<ide> def dos2unix_one_dir(modified_files,dir_name,file_names):
<ide> for file in file_names:
<ide> def dos2unix_dir(dir_name):
<ide> def unix2dos(file):
<ide> "Replace LF with CRLF in argument files. Print names of changed files."
<ide> if os.path.isdir(file):
<del> print file, "Directory!"
<add> print(file, "Directory!")
<ide> return
<ide>
<ide> data = open(file, "rb").read()
<ide> if '\0' in data:
<del> print file, "Binary!"
<add> print(file, "Binary!")
<ide> return
<ide> newdata = re.sub("\r\n", "\n", data)
<ide> newdata = re.sub("\n", "\r\n", newdata)
<ide> if newdata != data:
<del> print 'unix2dos:', file
<add> print('unix2dos:', file)
<ide> f = open(file, "wb")
<ide> f.write(newdata)
<ide> f.close()
<ide> return file
<ide> else:
<del> print file, 'ok'
<add> print(file, 'ok')
<ide>
<ide> def unix2dos_one_dir(modified_files,dir_name,file_names):
<ide> for file in file_names:
<ide><path>numpy/distutils/log.py
<ide> # Colored log, requires Python 2.3 or up.
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from distutils.log import *
<ide><path>numpy/distutils/mingw32ccompiler.py
<ide> # 3. Force windows to use g77
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide> def object_filenames (self,
<ide> def find_python_dll():
<ide> maj, min, micro = [int(i) for i in sys.version_info[:3]]
<ide> dllname = 'python%d%d.dll' % (maj, min)
<del> print ("Looking for %s" % dllname)
<add> print("Looking for %s" % dllname)
<ide>
<ide> # We can't do much here:
<ide> # - find it in python main dir
<ide><path>numpy/distutils/misc_util.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import re
<ide> def _fix_paths(paths,local_path,include_non_existing):
<ide> else:
<ide> if include_non_existing:
<ide> new_paths.append(n)
<del> print('could not resolve pattern in %r: %r' \
<del> % (local_path,n))
<add> print('could not resolve pattern in %r: %r' %
<add> (local_path,n))
<ide> else:
<ide> n2 = njoin(local_path,n)
<ide> if os.path.exists(n2):
<ide> def _fix_paths(paths,local_path,include_non_existing):
<ide> elif include_non_existing:
<ide> new_paths.append(n)
<ide> if not os.path.exists(n):
<del> print('non-existing path in %r: %r' \
<del> % (local_path,n))
<add> print('non-existing path in %r: %r' %
<add> (local_path,n))
<ide>
<ide> elif is_sequence(n):
<ide> new_paths.extend(_fix_paths(n,local_path,include_non_existing))
<ide> def get_data_files(data):
<ide> if os.path.isfile(s):
<ide> filenames.append(s)
<ide> else:
<del> print('Not existing data file:',s)
<add> print('Not existing data file:', s)
<ide> else:
<ide> raise TypeError(repr(s))
<ide> return filenames
<ide> def add_data_dir(self,data_path):
<ide> #
<ide> for path in paths:
<ide> if not os.path.isdir(path):
<del> print('Not a directory, skipping',path)
<add> print('Not a directory, skipping', path)
<ide> continue
<ide> rpath = rel_path(path, self.local_path)
<ide> path_list = rpath.split(os.sep)
<ide><path>numpy/distutils/npy_pkg_config.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import re
<ide> def read_config(pkgname, dirs=None):
<ide> files = glob.glob("*.ini")
<ide> for f in files:
<ide> info = read_config(f)
<del> print ("%s\t%s - %s" % (info.name, info.name, info.description))
<add> print("%s\t%s - %s" % (info.name, info.name, info.description))
<ide>
<ide> pkg_name = args[1]
<ide> import os
<ide> def read_config(pkgname, dirs=None):
<ide> info.vars[name] = value
<ide>
<ide> if options.cflags:
<del> print (info.cflags(section))
<add> print(info.cflags(section))
<ide> if options.libs:
<del> print (info.libs(section))
<add> print(info.libs(section))
<ide> if options.version:
<del> print (info.version)
<add> print(info.version)
<ide> if options.min_version:
<del> print (info.version >= options.min_version)
<add> print(info.version >= options.min_version)
<ide><path>numpy/distutils/numpy_distribution.py
<ide> # XXX: Handle setuptools ?
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.core import Distribution
<ide>
<ide><path>numpy/distutils/pathccompiler.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from distutils.unixccompiler import UnixCCompiler
<ide>
<ide><path>numpy/distutils/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/system_info.py
<ide> NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import os
<ide><path>numpy/distutils/tests/f2py_ext/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/distutils/tests/f2py_ext/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/tests/f2py_ext/tests/test_fib2.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/f2py_f90_ext/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/distutils/tests/f2py_f90_ext/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/tests/f2py_f90_ext/tests/test_foo.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/gen_ext/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/distutils/tests/gen_ext/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> fib3_f = '''
<ide> C FILE: FIB3.F
<ide><path>numpy/distutils/tests/gen_ext/tests/test_fib3.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/pyrex_ext/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/distutils/tests/pyrex_ext/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/tests/pyrex_ext/tests/test_primes.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/tests/swig_ext/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide><path>numpy/distutils/tests/swig_ext/setup.py
<ide> #!/usr/bin/env python
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide> from numpy.distutils.misc_util import Configuration
<ide><path>numpy/distutils/tests/swig_ext/tests/test_example.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/swig_ext/tests/test_example2.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/distutils/tests/test_exec_command.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/distutils/tests/test_fcompiler_gnu.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide>
<ide><path>numpy/distutils/tests/test_fcompiler_intel.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide>
<ide><path>numpy/distutils/tests/test_misc_util.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy.distutils.misc_util import appendpath, minrelpath, gpaths, rel_path
<ide><path>numpy/distutils/tests/test_npy_pkg_config.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> from tempfile import mkstemp
<ide><path>numpy/distutils/unixccompiler.py
<ide> unixccompiler - can handle very long argument lists for ar.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide>
<ide><path>numpy/doc/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide>
<ide><path>numpy/doc/basics.py
<ide> methods arrays do.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/broadcasting.py
<ide> for illustrations of broadcasting concepts.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/byteswapping.py
<ide> False
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/constants.py
<ide> #
<ide> # Note: the docstring is autogenerated.
<ide> #
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import textwrap, re
<ide>
<ide><path>numpy/doc/creation.py
<ide> diagonal).
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/glossary.py
<ide> and f2py (which wraps Fortran).
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/howtofind.py
<ide> How to find things in NumPy.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/indexing.py
<ide> 40
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/internals.py
<ide> it is more in line with Python semantics and the natural order of the data.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/io.py
<ide> Placeholder for array I/O documentation.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/jargon.py
<ide> Placeholder for computer science, engineering and other jargon.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/methods_vs_functions.py
<ide> Placeholder for Methods vs. Functions documentation.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/misc.py
<ide> 5) SIP (used mainly in PyQT)
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/performance.py
<ide> Placeholder for Improving Performance documentation.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/structured_arrays.py
<ide> <http://www.scipy.org/Cookbook/Recarray>`_.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/subclassing.py
<ide> def __array_wrap__(self, arr, context=None):
<ide>
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/doc/ufuncs.py
<ide> a convenient way to apply these operators.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide><path>numpy/dual.py
<ide> .. _Scipy : http://www.scipy.org
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # This module should be used for functions both in numpy and scipy if
<ide> # you want to use the numpy version if available but the scipy version
<ide><path>numpy/f2py/__init__.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['run_main','compile','f2py_testing']
<ide>
<ide><path>numpy/f2py/__version__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> major = 2
<ide>
<ide><path>numpy/f2py/auxfuncs.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.65 $"[10:-1]
<ide>
<ide><path>numpy/f2py/capi_maps.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.60 $"[10:-1]
<ide>
<ide><path>numpy/f2py/cb_rules.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.53 $"[10:-1]
<ide>
<ide><path>numpy/f2py/cfuncs.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.75 $"[10:-1]
<ide>
<ide> def get_needs():
<ide> del outneeds[n][0]
<ide> if saveout and (0 not in map(lambda x,y:x==y,saveout,outneeds[n])) \
<ide> and outneeds[n] != []:
<del> print n,saveout
<add> print(n,saveout)
<ide> errmess('get_needs: no progress in sorting needs, probably circular dependence, skipping.\n')
<ide> out=out+saveout
<ide> break
<ide><path>numpy/f2py/common_rules.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.19 $"[10:-1]
<ide>
<ide><path>numpy/f2py/crackfortran.py
<ide> The above may be solved by creating appropriate preprocessor program, for example.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.177 $"[10:-1]
<ide> import platform
<ide> def analyzeline(m,case,line):
<ide> if not m1:
<ide> if case in ['public','private']: k=''
<ide> else:
<del> print m.groupdict()
<add> print(m.groupdict())
<ide> outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`))
<ide> continue
<ide> else:
<ide> def analyzeline(m,case,line):
<ide> else:
<ide> pass
<ide> else:
<del> print m.groupdict()
<add> print(m.groupdict())
<ide> outmess('analyzeline: Could not crack the use statement.\n')
<ide> elif case in ['f2pyenhancements']:
<ide> if 'f2pyenhancements' not in groupcache[groupcounter]:
<ide> def analyzeline(m,case,line):
<ide> m.group('this'))
<ide> else:
<ide> if verbose>1:
<del> print m.groupdict()
<add> print(m.groupdict())
<ide> outmess('analyzeline: No code implemented for line.\n')
<ide>
<ide> def appendmultiline(group, context_name,ml):
<ide> def get_parameters(vars, global_params={}):
<ide> if nl!=n:
<ide> params[nl] = params[n]
<ide> else:
<del> print vars[n]
<add> print(vars[n])
<ide> outmess('get_parameters:parameter %s does not have value?!\n'%(`n`))
<ide> return params
<ide>
<ide><path>numpy/f2py/diagnose.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide> import tempfile
<ide>
<ide> def run_command(cmd):
<del> print 'Running %r:' % (cmd)
<add> print('Running %r:' % (cmd))
<ide> s = os.system(cmd)
<del> print '------'
<add> print('------')
<ide> def run():
<ide> _path = os.getcwd()
<ide> os.chdir(tempfile.gettempdir())
<del> print '------'
<del> print 'os.name=%r' % (os.name)
<del> print '------'
<del> print 'sys.platform=%r' % (sys.platform)
<del> print '------'
<del> print 'sys.version:'
<del> print sys.version
<del> print '------'
<del> print 'sys.prefix:'
<del> print sys.prefix
<del> print '------'
<del> print 'sys.path=%r' % (':'.join(sys.path))
<del> print '------'
<add> print('------')
<add> print('os.name=%r' % (os.name))
<add> print('------')
<add> print('sys.platform=%r' % (sys.platform))
<add> print('------')
<add> print('sys.version:')
<add> print(sys.version)
<add> print('------')
<add> print('sys.prefix:')
<add> print(sys.prefix)
<add> print('------')
<add> print('sys.path=%r' % (':'.join(sys.path)))
<add> print('------')
<ide>
<ide> try:
<ide> import numpy
<ide> has_newnumpy = 1
<ide> except ImportError:
<del> print 'Failed to import new numpy:', sys.exc_info()[1]
<add> print('Failed to import new numpy:', sys.exc_info()[1])
<ide> has_newnumpy = 0
<ide>
<ide> try:
<ide> from numpy.f2py import f2py2e
<ide> has_f2py2e = 1
<ide> except ImportError:
<del> print 'Failed to import f2py2e:',sys.exc_info()[1]
<add> print('Failed to import f2py2e:',sys.exc_info()[1])
<ide> has_f2py2e = 0
<ide>
<ide> try:
<ide> def run():
<ide> import numpy_distutils
<ide> has_numpy_distutils = 1
<ide> except ImportError:
<del> print 'Failed to import numpy_distutils:',sys.exc_info()[1]
<add> print('Failed to import numpy_distutils:',sys.exc_info()[1])
<ide> has_numpy_distutils = 0
<ide>
<ide> if has_newnumpy:
<ide> try:
<del> print 'Found new numpy version %r in %s' % \
<del> (numpy.__version__, numpy.__file__)
<add> print('Found new numpy version %r in %s' % \
<add> (numpy.__version__, numpy.__file__))
<ide> except Exception as msg:
<del> print 'error:', msg
<del> print '------'
<add> print('error:', msg)
<add> print('------')
<ide>
<ide> if has_f2py2e:
<ide> try:
<del> print 'Found f2py2e version %r in %s' % \
<del> (f2py2e.__version__.version,f2py2e.__file__)
<add> print('Found f2py2e version %r in %s' % \
<add> (f2py2e.__version__.version,f2py2e.__file__))
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide>
<ide> if has_numpy_distutils:
<ide> try:
<ide> if has_numpy_distutils == 2:
<del> print 'Found numpy.distutils version %r in %r' % (\
<add> print('Found numpy.distutils version %r in %r' % (\
<ide> numpy.distutils.__version__,
<del> numpy.distutils.__file__)
<add> numpy.distutils.__file__))
<ide> else:
<del> print 'Found numpy_distutils version %r in %r' % (\
<add> print('Found numpy_distutils version %r in %r' % (\
<ide> numpy_distutils.numpy_distutils_version.numpy_distutils_version,
<del> numpy_distutils.__file__)
<del> print '------'
<add> numpy_distutils.__file__))
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide> try:
<ide> if has_numpy_distutils == 1:
<del> print 'Importing numpy_distutils.command.build_flib ...',
<add> print('Importing numpy_distutils.command.build_flib ...', end=' ')
<ide> import numpy_distutils.command.build_flib as build_flib
<del> print 'ok'
<del> print '------'
<add> print('ok')
<add> print('------')
<ide> try:
<del> print 'Checking availability of supported Fortran compilers:'
<add> print('Checking availability of supported Fortran compilers:')
<ide> for compiler_class in build_flib.all_compilers:
<ide> compiler_class(verbose=1).is_available()
<del> print '------'
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)'
<del> print '------'
<add> print('error:',msg,'(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
<add> print('------')
<ide> try:
<ide> if has_numpy_distutils == 2:
<del> print 'Importing numpy.distutils.fcompiler ...',
<add> print('Importing numpy.distutils.fcompiler ...', end=' ')
<ide> import numpy.distutils.fcompiler as fcompiler
<ide> else:
<del> print 'Importing numpy_distutils.fcompiler ...',
<add> print('Importing numpy_distutils.fcompiler ...', end=' ')
<ide> import numpy_distutils.fcompiler as fcompiler
<del> print 'ok'
<del> print '------'
<add> print('ok')
<add> print('------')
<ide> try:
<del> print 'Checking availability of supported Fortran compilers:'
<add> print('Checking availability of supported Fortran compilers:')
<ide> fcompiler.show_fcompilers()
<del> print '------'
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide> try:
<ide> if has_numpy_distutils == 2:
<del> print 'Importing numpy.distutils.cpuinfo ...',
<add> print('Importing numpy.distutils.cpuinfo ...', end=' ')
<ide> from numpy.distutils.cpuinfo import cpuinfo
<del> print 'ok'
<del> print '------'
<add> print('ok')
<add> print('------')
<ide> else:
<ide> try:
<del> print 'Importing numpy_distutils.command.cpuinfo ...',
<add> print('Importing numpy_distutils.command.cpuinfo ...', end=' ')
<ide> from numpy_distutils.command.cpuinfo import cpuinfo
<del> print 'ok'
<del> print '------'
<add> print('ok')
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg,'(ignore it)'
<del> print 'Importing numpy_distutils.cpuinfo ...',
<add> print('error:',msg,'(ignore it)')
<add> print('Importing numpy_distutils.cpuinfo ...', end=' ')
<ide> from numpy_distutils.cpuinfo import cpuinfo
<del> print 'ok'
<del> print '------'
<add> print('ok')
<add> print('------')
<ide> cpu = cpuinfo()
<del> print 'CPU information:',
<add> print('CPU information:', end=' ')
<ide> for name in dir(cpuinfo):
<ide> if name[0]=='_' and name[1]!='_' and getattr(cpu,name[1:])():
<del> print name[1:],
<del> print '------'
<add> print(name[1:], end=' ')
<add> print('------')
<ide> except Exception as msg:
<del> print 'error:',msg
<del> print '------'
<add> print('error:',msg)
<add> print('------')
<ide> os.chdir(_path)
<ide> if __name__ == "__main__":
<ide> run()
<ide><path>numpy/f2py/doc/collectinput.py
<ide> collectinput # in and out are stdin and stdout
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "0.0"
<ide>
<ide> except: flag=0
<ide> if flag==0:
<ide> sys.stderr.write('Could not open a file: '+fn+'\n')
<del> print l+l1
<add> print(l+l1)
<ide> continue
<ide> elif flag==1:
<ide> sys.stderr.write(fn+'\n')
<del> print '%%%%% Begin of '+fn
<del> print getoutput(sys.argv[0]+' < '+fn)
<del> print '%%%%% End of '+fn
<add> print('%%%%% Begin of '+fn)
<add> print(getoutput(sys.argv[0]+' < '+fn))
<add> print('%%%%% End of '+fn)
<ide> else:
<ide> sys.stderr.write('Could not extract a file name from: '+l)
<del> print l+l1
<add> print(l+l1)
<ide> else:
<del> print l+l1
<add> print(l+l1)
<ide> sys.stdout.close()
<ide><path>numpy/f2py/docs/pytest.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> #File: pytest.py
<ide> import Numeric
<ide><path>numpy/f2py/docs/usersguide/setup_example.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # File: setup_example.py
<ide>
<ide><path>numpy/f2py/f2py2e.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from . import __version__
<ide> f2py_version = __version__.version
<ide> def scaninputline(inputline):
<ide> elif l=='-h': f2=1
<ide> elif l=='-m': f3=1
<ide> elif l[:2]=='-v':
<del> print f2py_version
<add> print(f2py_version)
<ide> sys.exit()
<ide> elif l=='--show-compilers':
<ide> f5=1
<ide> def scaninputline(inputline):
<ide> elif f==-1: skipfuncs.append(l)
<ide> elif f==0: onlyfuncs.append(l)
<ide> if not f5 and not files and not modulename:
<del> print __usage__
<add> print(__usage__)
<ide> sys.exit()
<ide> if not os.path.isdir(buildpath):
<ide> if not verbose:
<ide> def run_compile():
<ide> nv = vmap[ov]
<ide> except KeyError:
<ide> if ov not in vmap.values():
<del> print 'Unknown vendor: "%s"' % (s[len(v):])
<add> print('Unknown vendor: "%s"' % (s[len(v):]))
<ide> nv = ov
<ide> i = flib_flags.index(s)
<ide> flib_flags[i] = '--fcompiler=' + nv
<ide> def run_compile():
<ide> if len(name_value)==2:
<ide> define_macros[i] = tuple(name_value)
<ide> else:
<del> print 'Invalid use of -D:',name_value
<add> print('Invalid use of -D:',name_value)
<ide>
<ide> from numpy.distutils.system_info import get_info
<ide>
<ide><path>numpy/f2py/f2py_testing.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import re
<ide> def run(runtest,test_functions,repeat=1):
<ide> else:
<ide> diff_memusage2 = memusage() - start_memusage
<ide> if diff_memusage2!=diff_memusage:
<del> print 'memory usage change at step %i:' % i,\
<add> print('memory usage change at step %i:' % i,\
<ide> diff_memusage2-diff_memusage,\
<del> fname
<add> fname)
<ide> diff_memusage = diff_memusage2
<ide> current_memusage = memusage()
<del> print 'run',repeat*len(test_functions),'tests',\
<del> 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0)
<add> print('run',repeat*len(test_functions),'tests',\
<add> 'in %.2f seconds' % ((jiffies()-start_jiffies)/100.0))
<ide> if start_memusage:
<del> print 'initial virtual memory size:',start_memusage,'bytes'
<del> print 'current virtual memory size:',current_memusage,'bytes'
<add> print('initial virtual memory size:',start_memusage,'bytes')
<add> print('current virtual memory size:',current_memusage,'bytes')
<ide><path>numpy/f2py/f90mod_rules.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.27 $"[10:-1]
<ide>
<ide> def iadd(line,s=ihooks): s[0] = '%s\n%s'%(s[0],line)
<ide> if hasbody(m):
<ide> for b in m['body']:
<ide> if not isroutine(b):
<del> print 'Skipping',b['block'],b['name']
<add> print('Skipping',b['block'],b['name'])
<ide> continue
<ide> modobjs.append('%s()'%(b['name']))
<ide> b['modulename'] = m['name']
<ide><path>numpy/f2py/func2subr.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.16 $"[10:-1]
<ide>
<ide><path>numpy/f2py/info.py
<ide> """Fortran to Python Interface Generator.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> postpone_import = True
<ide><path>numpy/f2py/rules.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.129 $"[10:-1]
<ide>
<ide><path>numpy/f2py/setup.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> __version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
<ide>
<ide><path>numpy/f2py/tests/test_array_from_pyobj.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import unittest
<ide> import os
<ide><path>numpy/f2py/tests/test_assumed_shape.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import math
<ide><path>numpy/f2py/tests/test_callback.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_kind.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import math
<ide><path>numpy/f2py/tests/test_mixed.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import math
<ide><path>numpy/f2py/tests/test_return_character.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_return_complex.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_return_integer.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_return_logical.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_return_real.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy import array
<ide><path>numpy/f2py/tests/test_size.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import math
<ide><path>numpy/f2py/tests/util.py
<ide> - detecting if compilers are present
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide><path>numpy/f2py/use_rules.py
<ide> Pearu Peterson
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __version__ = "$Revision: 1.3 $"[10:-1]
<ide>
<ide> def buildusevar(name,realname,vars,usemodulename):
<ide> rd=dictappend({},vrd)
<ide> var=vars[realname]
<ide>
<del> print name,realname,vars[realname]
<add> print(name,realname,vars[realname])
<ide> ret=applyrules(usemodule_rules,rd)
<ide> return ret
<ide><path>numpy/fft/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # To get sub-modules
<ide> from .info import __doc__
<ide><path>numpy/fft/fftpack.py
<ide> version of the FFTPACK routines.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
<ide> 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
<ide><path>numpy/fft/helper.py
<ide> Discrete Fourier Transforms - helper.py
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # Created by Pearu Peterson, September 2002
<ide>
<ide><path>numpy/fft/info.py
<ide> For examples, see the various functions.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> depends = ['core']
<ide><path>numpy/fft/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide>
<ide> def configuration(parent_package='',top_path=None):
<ide><path>numpy/fft/tests/test_fftpack.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
<ide><path>numpy/fft/tests/test_helper.py
<ide> Copied from fftpack.helper by Pearu Peterson, October 2005
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
<ide><path>numpy/lib/__init__.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from .info import __doc__
<ide> from numpy.version import version as __version__
<ide><path>numpy/lib/_datasource.py
<ide> >>> fp.close()
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __docformat__ = "restructuredtext en"
<ide>
<ide><path>numpy/lib/_iotools.py
<ide> """A collection of functions designed to help I/O with ascii files.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __docformat__ = "restructuredtext en"
<ide>
<ide><path>numpy/lib/arraypad.py
<ide> of an n-dimensional array.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide>
<ide><path>numpy/lib/arraysetops.py
<ide> :Author: Robert Cimrman
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d',
<ide> 'unique', 'in1d']
<ide><path>numpy/lib/arrayterator.py
<ide> a user-specified number of elements.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from operator import mul
<ide>
<ide><path>numpy/lib/financial.py
<ide> or arrays (or other sequences).
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide>
<ide><path>numpy/lib/format.py
<ide> alternatives, is described fully in the "npy-format" NEP.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy
<ide> import sys
<ide><path>numpy/lib/function_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __docformat__ = "restructuredtext en"
<ide> __all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
<ide><path>numpy/lib/index_tricks.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['ravel_multi_index',
<ide> 'unravel_index',
<ide><path>numpy/lib/info.py
<ide> ================ ===================
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> depends = ['core','testing']
<ide> global_symbols = ['*']
<ide><path>numpy/lib/npyio.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from . import format
<ide><path>numpy/lib/polynomial.py
<ide> Functions to operate on polynomials.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
<ide> 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
<ide><path>numpy/lib/recfunctions.py
<ide> They have been rewritten and extended for convenience.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import itertools
<ide><path>numpy/lib/scimath.py
<ide> correctly handled. See their respective docstrings for specific examples.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['sqrt', 'log', 'log2', 'logn','log10', 'power', 'arccos',
<ide> 'arcsin', 'arctanh']
<ide><path>numpy/lib/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> from os.path import join
<ide>
<ide><path>numpy/lib/shape_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['column_stack','row_stack', 'dstack','array_split','split','hsplit',
<ide> 'vsplit','dsplit','apply_over_axes','expand_dims',
<ide><path>numpy/lib/stride_tricks.py
<ide> NumPy reference guide.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide>
<ide><path>numpy/lib/tests/test__datasource.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import urllib2
<ide><path>numpy/lib/tests/test__iotools.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import time
<ide><path>numpy/lib/tests/test_arraypad.py
<ide> """Tests for the pad functions.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import TestCase, run_module_suite, assert_array_equal
<ide> from numpy.testing import assert_raises, assert_array_almost_equal
<ide><path>numpy/lib/tests/test_arraysetops.py
<ide> """Test functions for 1D array set operations.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> import numpy as np
<ide><path>numpy/lib/tests/test_arrayterator.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from operator import mul
<ide>
<ide><path>numpy/lib/tests/test_financial.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> import numpy as np
<ide><path>numpy/lib/tests/test_format.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> r''' Test the .npy file format.
<ide>
<ide><path>numpy/lib/tests/test_function_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import warnings
<ide> import numpy as np
<ide><path>numpy/lib/tests/test_index_tricks.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> import numpy as np
<ide><path>numpy/lib/tests/test_io.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> import gzip
<ide><path>numpy/lib/tests/test_polynomial.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> '''
<ide> >>> p = np.poly1d([1.,2,3])
<ide><path>numpy/lib/tests/test_recfunctions.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/lib/tests/test_regression.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/lib/tests/test_shape_base.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy.lib import *
<ide><path>numpy/lib/tests/test_stride_tricks.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import numpy as np
<ide> from numpy.testing import *
<ide><path>numpy/lib/tests/test_twodim_base.py
<ide> """Test functions for matrix module
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide>
<ide><path>numpy/lib/tests/test_type_check.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> from numpy.lib import *
<ide><path>numpy/lib/tests/test_ufunclike.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.testing import *
<ide> import numpy.core as nx
<ide><path>numpy/lib/tests/test_utils.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> from numpy.testing import *
<ide><path>numpy/lib/twodim_base.py
<ide> """ Basic functions for manipulating 2d arrays
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
<ide> 'tril','vander','histogram2d','mask_indices',
<ide><path>numpy/lib/type_check.py
<ide> """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
<ide> 'isreal','nan_to_num','real','real_if_close',
<ide><path>numpy/lib/ufunclike.py
<ide> storing results in an output array.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> __all__ = ['fix', 'isneginf', 'isposinf']
<ide>
<ide><path>numpy/lib/user_array.py
<ide> complete.
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from numpy.core import array, asarray, absolute, add, subtract, multiply, \
<ide> divide, remainder, power, left_shift, right_shift, bitwise_and, \
<ide> def __getattr__(self,attr):
<ide>
<ide> ua=container(temp)
<ide> # new object created begin test
<del> print dir(ua)
<del> print shape(ua),ua.shape # I have changed Numeric.py
<add> print(dir(ua))
<add> print(shape(ua),ua.shape) # I have changed Numeric.py
<ide>
<ide> ua_small=ua[:3,:5]
<del> print ua_small
<add> print(ua_small)
<ide> ua_small[0,0]=10 # this did not change ua[0,0], which is not normal behavior
<del> print ua_small[0,0],ua[0,0]
<del> print sin(ua_small)/3.*6.+sqrt(ua_small**2)
<del> print less(ua_small,103),type(less(ua_small,103))
<del> print type(ua_small*reshape(arange(15),shape(ua_small)))
<del> print reshape(ua_small,(5,3))
<del> print transpose(ua_small)
<add> print(ua_small[0,0],ua[0,0])
<add> print(sin(ua_small)/3.*6.+sqrt(ua_small**2))
<add> print(less(ua_small,103),type(less(ua_small,103)))
<add> print(type(ua_small*reshape(arange(15),shape(ua_small))))
<add> print(reshape(ua_small,(5,3)))
<add> print(transpose(ua_small))
<ide><path>numpy/lib/utils.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import os
<ide> import sys
<ide> def who(vardict=None):
<ide> sp2 = max(10,maxshape)
<ide> sp3 = max(10,maxbyte)
<ide> prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
<del> print prval + "\n" + "="*(len(prval)+5) + "\n"
<add> print(prval + "\n" + "="*(len(prval)+5) + "\n")
<ide>
<ide> for k in range(len(sta)):
<ide> val = sta[k]
<del> print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
<add> print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
<ide> val[1], ' '*(sp2-len(val[1])+5),
<ide> val[2], ' '*(sp3-len(val[2])+5),
<del> val[3])
<del> print "\nUpper bound on total bytes = %d" % totalbytes
<add> val[3]))
<add> print("\nUpper bound on total bytes = %d" % totalbytes)
<ide> return
<ide>
<ide> #-----------------------------------------------------------------------------
<ide> def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
<ide> try:
<ide> obj = _namedict[namestr][object]
<ide> if id(obj) in objlist:
<del> print >> output, "\n *** Repeat reference found in %s *** " % namestr
<add> print("\n *** Repeat reference found in %s *** " % namestr, file=output)
<ide> else:
<ide> objlist.append(id(obj))
<del> print >> output, " *** Found in %s ***" % namestr
<add> print(" *** Found in %s ***" % namestr, file=output)
<ide> info(obj)
<del> print >> output, "-"*maxwidth
<add> print("-"*maxwidth, file=output)
<ide> numfound += 1
<ide> except KeyError:
<ide> pass
<ide> if numfound == 0:
<del> print >> output, "Help for %s not found." % object
<add> print("Help for %s not found." % object, file=output)
<ide> else:
<del> print >> output, "\n *** Total of %d references found. ***" % numfound
<add> print("\n *** Total of %d references found. ***" % numfound, file=output)
<ide>
<ide> elif inspect.isfunction(object):
<ide> name = object.__name__
<ide> def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
<ide> else:
<ide> argstr = name + arguments
<ide>
<del> print >> output, " " + argstr + "\n"
<del> print >> output, inspect.getdoc(object)
<add> print(" " + argstr + "\n", file=output)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide> elif inspect.isclass(object):
<ide> name = object.__name__
<ide> def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
<ide> else:
<ide> argstr = name + arguments
<ide>
<del> print >> output, " " + argstr + "\n"
<add> print(" " + argstr + "\n", file=output)
<ide> doc1 = inspect.getdoc(object)
<ide> if doc1 is None:
<ide> if hasattr(object,'__init__'):
<del> print >> output, inspect.getdoc(object.__init__)
<add> print(inspect.getdoc(object.__init__), file=output)
<ide> else:
<del> print >> output, inspect.getdoc(object)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide> methods = pydoc.allmethods(object)
<ide> if methods != []:
<del> print >> output, "\n\nMethods:\n"
<add> print("\n\nMethods:\n", file=output)
<ide> for meth in methods:
<ide> if meth[0] == '_':
<ide> continue
<ide> thisobj = getattr(object, meth, None)
<ide> if thisobj is not None:
<ide> methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
<del> print >> output, " %s -- %s" % (meth, methstr)
<add> print(" %s -- %s" % (meth, methstr), file=output)
<ide>
<ide> elif type(object) is types.InstanceType: ## check for __call__ method
<del> print >> output, "Instance of class: ", object.__class__.__name__
<del> print >> output
<add> print("Instance of class: ", object.__class__.__name__, file=output)
<add> print(file=output)
<ide> if hasattr(object, '__call__'):
<ide> arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.__func__))
<ide> arglist = arguments.split(', ')
<ide> def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
<ide> else:
<ide> argstr = name + arguments
<ide>
<del> print >> output, " " + argstr + "\n"
<add> print(" " + argstr + "\n", file=output)
<ide> doc = inspect.getdoc(object.__call__)
<ide> if doc is not None:
<del> print >> output, inspect.getdoc(object.__call__)
<del> print >> output, inspect.getdoc(object)
<add> print(inspect.getdoc(object.__call__), file=output)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide> else:
<del> print >> output, inspect.getdoc(object)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide> elif inspect.ismethod(object):
<ide> name = object.__name__
<ide> def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
<ide> else:
<ide> argstr = name + arguments
<ide>
<del> print >> output, " " + argstr + "\n"
<del> print >> output, inspect.getdoc(object)
<add> print(" " + argstr + "\n", file=output)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide> elif hasattr(object, '__doc__'):
<del> print >> output, inspect.getdoc(object)
<add> print(inspect.getdoc(object), file=output)
<ide>
<ide>
<ide> def source(object, output=sys.stdout):
<ide> def interp(x, xp, fp, left=None, right=None):
<ide> # Local import to speed up numpy's import time.
<ide> import inspect
<ide> try:
<del> print >> output, "In file: %s\n" % inspect.getsourcefile(object)
<del> print >> output, inspect.getsource(object)
<add> print("In file: %s\n" % inspect.getsourcefile(object), file=output)
<add> print(inspect.getsource(object), file=output)
<ide> except:
<del> print >> output, "Not available for this object."
<add> print("Not available for this object.", file=output)
<ide>
<ide>
<ide> # Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
<ide> def relevance_value(a):
<ide> pager = pydoc.getpager()
<ide> pager("\n".join(help_text))
<ide> else:
<del> print "\n".join(help_text)
<add> print("\n".join(help_text))
<ide>
<ide> def _lookfor_generate_cache(module, import_modules, regenerate):
<ide> """
<ide><path>numpy/linalg/__init__.py
<ide> =============== ==========================================================
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> # To get sub-modules
<ide> from .info import __doc__
<ide><path>numpy/linalg/info.py
<ide> - LinAlgError Indicates a failed linear algebra operation
<ide>
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> depends = ['core']
<ide><path>numpy/linalg/lapack_lite/clapack_scrub.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, os
<ide> from io import StringIO
<ide> def scrubSource(source, nsteps=None, verbose=False):
<ide>
<ide> for msg, step in steps:
<ide> if verbose:
<del> print msg
<add> print(msg)
<ide> source = step(source)
<ide>
<ide> return source
<ide><path>numpy/linalg/lapack_lite/fortran.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import re
<ide> import itertools
<ide><path>numpy/linalg/lapack_lite/make_lite.py
<ide> #!/usr/bin/env python
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys, os
<ide> import fortran
<ide> def allRoutinesByType(self, typename):
<ide> return [a[1] for a in routines]
<ide>
<ide> def printRoutineNames(desc, routines):
<del> print desc
<add> print(desc)
<ide> for r in routines:
<del> print '\t%s' % r.name
<add> print('\t%s' % r.name)
<ide>
<ide> def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
<ide> blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
<ide> def scrubF2CSource(c_file):
<ide>
<ide> def main():
<ide> if len(sys.argv) != 4:
<del> print 'Usage: %s wrapped_routines_file lapack_dir output_dir' % \
<del> (sys.argv[0],)
<add> print('Usage: %s wrapped_routines_file lapack_dir output_dir' % \
<add> (sys.argv[0],))
<ide> return
<ide> wrapped_routines_file = sys.argv[1]
<ide> lapack_src_dir = sys.argv[2]
<ide> def main():
<ide> dumpRoutineNames(library, output_dir)
<ide>
<ide> for typename in ['blas', 'dlapack', 'zlapack']:
<del> print 'creating %s_lite.c ...' % typename
<add> print('creating %s_lite.c ...' % typename)
<ide> routines = library.allRoutinesByType(typename)
<ide> fortran_file = os.path.join(output_dir, typename+'_lite.f')
<ide> c_file = fortran_file[:-2] + '.c'
<ide> concatenateRoutines(routines, fortran_file)
<ide> try:
<ide> runF2C(fortran_file, output_dir)
<ide> except F2CError:
<del> print 'f2c failed on %s' % fortran_file
<add> print('f2c failed on %s' % fortran_file)
<ide> break
<ide> scrubF2CSource(c_file)
<ide>
<ide><path>numpy/linalg/linalg.py
<ide> dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
<ide> zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide>
<ide> __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
<ide><path>numpy/linalg/setup.py
<del>from __future__ import division
<add>from __future__ import division, print_function
<ide>
<ide> import sys
<ide>
<ide><path>numpy/linalg/tests/test_build.py
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> from subprocess import call, PIPE, Popen
<ide> import sys
<ide><path>numpy/linalg/tests/test_linalg.py
<ide> """ Test functions for linalg module
<ide> """
<del>from __future__ import division, absolute_import
<add>from __future__ import division, absolute_import, print_function
<ide>
<ide> import sys
<ide> | 300 |
Python | Python | add openstackidentity_2_0_connection_voms class | ec125bffd07f925083dce81fb1b1768f862e8723 | <ide><path>libcloud/common/openstack_identity.py
<ide> from libcloud.utils.py3 import httplib
<ide> from libcloud.utils.iso8601 import parse_date
<ide>
<del>from libcloud.common.base import ConnectionUserAndKey, Response
<add>from libcloud.common.base import (ConnectionUserAndKey, Response,
<add> CertificateConnection)
<ide> from libcloud.compute.types import (LibcloudError, InvalidCredsError,
<ide> MalformedResponseError)
<ide>
<ide> '2.0',
<ide> '2.0_apikey',
<ide> '2.0_password',
<add> '2.0_voms',
<ide> '3.0',
<ide> '3.x_password',
<ide> '3.x_oidc_access_token'
<ide> 'OpenStackIdentity_1_0_Connection',
<ide> 'OpenStackIdentity_1_1_Connection',
<ide> 'OpenStackIdentity_2_0_Connection',
<add> 'OpenStackIdentity_2_0_Connection_VOMS',
<ide> 'OpenStackIdentity_3_0_Connection',
<ide> 'OpenStackIdentity_3_0_Connection_OIDC_access_token',
<ide>
<ide> def _get_project_id(self, token):
<ide> driver=self.driver)
<ide>
<ide>
<add>class OpenStackIdentity_2_0_Connection_VOMS(OpenStackIdentityConnection,
<add> CertificateConnection):
<add> """
<add> Connection class for Keystone API v2.0. with VOMS proxy support
<add> In this case the key parameter will be the path of the VOMS proxy file.
<add> """
<add>
<add> responseCls = OpenStackAuthResponse
<add> name = 'OpenStack Identity API v2.0 VOMS support'
<add> auth_version = '2.0'
<add>
<add> def __init__(self, auth_url, user_id, key, tenant_name=None,
<add> domain_name='Default',
<add> token_scope=OpenStackIdentityTokenScope.PROJECT,
<add> timeout=None, parent_conn=None):
<add> CertificateConnection.__init__(self, cert_file=key,
<add> url=auth_url,
<add> timeout=timeout)
<add>
<add> self.parent_conn = parent_conn
<add>
<add> # enable tests to use the same mock connection classes.
<add> if parent_conn:
<add> self.conn_classes = parent_conn.conn_classes
<add> self.driver = parent_conn.driver
<add> else:
<add> self.driver = None
<add>
<add> self.auth_url = auth_url
<add> self.tenant_name = tenant_name
<add> self.domain_name = domain_name
<add> self.token_scope = token_scope
<add> self.timeout = timeout
<add>
<add> self.urls = {}
<add> self.auth_token = None
<add> self.auth_token_expires = None
<add> self.auth_user_info = None
<add>
<add> def authenticate(self, force=False):
<add> if not self._is_authentication_needed(force=force):
<add> return self
<add>
<add> data = {'auth': {"voms": True}}
<add> if self.tenant_name:
<add> data['auth']['tenantName'] = self.tenant_name
<add> reqbody = json.dumps(data)
<add> return self._authenticate_2_0_with_body(reqbody)
<add>
<add> def _authenticate_2_0_with_body(self, reqbody):
<add> resp = self.request('/v2.0/tokens', data=reqbody,
<add> headers={'Content-Type': 'application/json'},
<add> method='POST')
<add>
<add> if resp.status == httplib.UNAUTHORIZED:
<add> raise InvalidCredsError()
<add> elif resp.status not in [httplib.OK,
<add> httplib.NON_AUTHORITATIVE_INFORMATION]:
<add> body = 'code: %s body: %s' % (resp.status, resp.body)
<add> raise MalformedResponseError('Malformed response', body=body,
<add> driver=self.driver)
<add> else:
<add> body = resp.object
<add>
<add> try:
<add> access = body['access']
<add> expires = access['token']['expires']
<add>
<add> self.auth_token = access['token']['id']
<add> self.auth_token_expires = parse_date(expires)
<add> self.urls = access['serviceCatalog']
<add> self.auth_user_info = access.get('user', {})
<add> except KeyError:
<add> e = sys.exc_info()[1]
<add> raise MalformedResponseError('Auth JSON response is \
<add> missing required elements', e)
<add>
<add> return self
<add>
<add>
<ide> def get_class_for_auth_version(auth_version):
<ide> """
<ide> Retrieve class for the provided auth version.
<ide> def get_class_for_auth_version(auth_version):
<ide> cls = OpenStackIdentity_2_0_Connection
<ide> elif auth_version == '2.0_password':
<ide> cls = OpenStackIdentity_2_0_Connection
<add> elif auth_version == '2.0_voms':
<add> cls = OpenStackIdentity_2_0_Connection_VOMS
<ide> elif auth_version == '3.x_password':
<ide> cls = OpenStackIdentity_3_0_Connection
<ide> elif auth_version == '3.x_oidc_access_token':
<ide><path>libcloud/test/common/test_openstack_identity.py
<ide> from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection_OIDC_access_token
<ide> from libcloud.common.openstack_identity import OpenStackIdentityUser
<ide> from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver
<add>from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection_VOMS
<ide>
<ide> from libcloud.test import unittest
<ide> from libcloud.test import MockHttp
<ide> def test_authenticate(self):
<ide> auth.authenticate()
<ide>
<ide>
<add>class OpenStackIdentity_2_0_Connection_VOMSTests(unittest.TestCase):
<add> def setUp(self):
<add> mock_cls = OpenStackIdentity_2_0_Connection_VOMSMockHttp
<add> mock_cls.type = None
<add> OpenStackIdentity_2_0_Connection_VOMS.conn_classes = (mock_cls, mock_cls)
<add>
<add> self.auth_instance = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
<add> user_id=None,
<add> key='/tmp/proxy.pem',
<add> tenant_name='VO')
<add> self.auth_instance.auth_token = 'mock'
<add>
<add> def test_authenticate(self):
<add> auth = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
<add> user_id=None,
<add> key='/tmp/proxy.pem',
<add> token_scope='test',
<add> tenant_name="VO")
<add> auth.authenticate()
<add>
<add>
<ide> class OpenStackServiceCatalogTestCase(unittest.TestCase):
<ide> fixtures = ComputeFileFixtures('openstack')
<ide>
<ide> def _v3_OS_FEDERATION_projects(self, method, url, body, headers):
<ide> return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
<ide> raise NotImplementedError()
<ide>
<add>
<add>class OpenStackIdentity_2_0_Connection_VOMSMockHttp(MockHttp):
<add> fixtures = ComputeFileFixtures('openstack_identity/v2')
<add> json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
<add>
<add> def _v2_0_tokens(self, method, url, body, headers):
<add> if method == 'POST':
<add> status = httplib.UNAUTHORIZED
<add> data = json.loads(body)
<add> if 'voms' in data['auth'] and data['auth']['voms'] is True:
<add> if 'tenantName' in data['auth'] and data['auth']['tenantName'] == 'VO':
<add> status = httplib.OK
<add>
<add> body = ComputeFileFixtures('openstack').load('_v2_0__auth.json')
<add> headers = self.json_content_headers.copy()
<add> headers['x-subject-token'] = '00000000000000000000000000000000'
<add> return (status, body, headers, httplib.responses[httplib.OK])
<add> raise NotImplementedError()
<add>
<ide> if __name__ == '__main__':
<ide> sys.exit(unittest.main()) | 2 |
Python | Python | avoid none callback | d53c84b6d6717375ee91d2847a3d0f24beafd8d1 | <ide><path>spacy/pipeline/tok2vec.py
<ide> def predict(self, docs: Iterable[Doc]):
<ide> tokvecs = self.model.predict(docs)
<ide> batch_id = Tok2VecListener.get_batch_id(docs)
<ide> for listener in self.listeners:
<del> listener.receive(batch_id, tokvecs, None)
<add> listener.receive(batch_id, tokvecs, lambda dX: [])
<ide> return tokvecs
<ide>
<ide> def set_annotations(self, docs: Sequence[Doc], tokvecses) -> None:
<ide><path>spacy/tests/pipeline/test_tok2vec.py
<ide> def test_tok2vec_listener():
<ide> nlp.select_pipes(disable="tok2vec")
<ide> assert nlp.pipe_names == ["tagger"]
<ide> nlp("Running the pipeline with the Tok2Vec component disabled.")
<add>
<add>
<add>def test_tok2vec_listener_callback():
<add> orig_config = Config().from_str(cfg_string)
<add> nlp, config = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
<add> assert nlp.pipe_names == ["tok2vec", "tagger"]
<add> tagger = nlp.get_pipe("tagger")
<add> tok2vec = nlp.get_pipe("tok2vec")
<add> nlp._link_components()
<add> docs = [nlp.make_doc("A random sentence")]
<add> tok2vec.model.initialize(X=docs)
<add> gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs]
<add> label_sample = [tagger.model.ops.asarray(gold_array, dtype="float32")]
<add> tagger.model.initialize(X=docs, Y=label_sample)
<add> docs = [nlp.make_doc("Another entirely random sentence")]
<add> tok2vec.predict(docs)
<add> Y, get_dX = tagger.model.begin_update(docs)
<add> # assure that the backprop call works (and doesn't hit a 'None' callback)
<add> assert get_dX(Y) is not None | 2 |
Python | Python | fix epsilon in objectives | 61b30997eb7e8833484e74ea1bc9df0bcb4b7617 | <ide><path>keras/objectives.py
<ide> def mean_absolute_error(y_true, y_pred):
<ide>
<ide>
<ide> def mean_absolute_percentage_error(y_true, y_pred):
<del> diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K._EPSILON, np.inf))
<add> diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), np.inf))
<ide> return 100. * K.mean(diff, axis=-1)
<ide>
<ide>
<ide> def mean_squared_logarithmic_error(y_true, y_pred):
<del> first_log = K.log(K.clip(y_pred, K._EPSILON, np.inf) + 1.)
<del> second_log = K.log(K.clip(y_true, K._EPSILON, np.inf) + 1.)
<add> first_log = K.log(K.clip(y_pred, K.epsilon(), np.inf) + 1.)
<add> second_log = K.log(K.clip(y_true, K.epsilon(), np.inf) + 1.)
<ide> return K.mean(K.square(first_log - second_log), axis=-1)
<ide>
<ide>
<ide> def binary_crossentropy(y_true, y_pred):
<ide>
<ide>
<ide> def poisson_loss(y_true, y_pred):
<del> return K.mean(y_pred - y_true * K.log(y_pred + K._EPSILON), axis=-1)
<add> return K.mean(y_pred - y_true * K.log(y_pred + K.epsilon()), axis=-1)
<ide>
<ide> # aliases
<ide> mse = MSE = mean_squared_error | 1 |
Javascript | Javascript | fix assert.strictequal params order | cb6c33e368d97c0c61a085e56b7196a676a09d85 | <ide><path>test/sequential/test-pipe.js
<ide> const tcp = net.Server(common.mustCall((s) => {
<ide> s.on('data', (d) => {
<ide> tcpLengthSeen += d.length;
<ide> for (let j = 0; j < d.length; j++) {
<del> assert.strictEqual(buffer[i], d[j]);
<add> assert.strictEqual(d[j], buffer[i]);
<ide> i++;
<ide> }
<ide> });
<ide> function startClient() {
<ide> }, common.mustCall((res) => {
<ide> res.setEncoding('utf8');
<ide> res.on('data', common.mustCall((string) => {
<del> assert.strictEqual('thanks', string);
<add> assert.strictEqual(string, 'thanks');
<ide> gotThanks = true;
<ide> }));
<ide> }));
<ide> function startClient() {
<ide>
<ide> process.on('exit', () => {
<ide> assert.ok(gotThanks);
<del> assert.strictEqual(bufferSize, tcpLengthSeen);
<add> assert.strictEqual(tcpLengthSeen, bufferSize);
<ide> }); | 1 |
Python | Python | fix cpplint --quiet option | b8a98a807f32bf7c1a99e9a8c3f644c85b977523 | <ide><path>tools/cpplint.py
<ide> def ParseArguments(args):
<ide> except ValueError:
<ide> PrintUsage('Extensions must be comma seperated list.')
<ide> elif opt == '--recursive':
<del> PrintUsage('Extensions must be comma separated list.')
<del> elif opt == '--logfile':
<ide> recursive = True
<del> elif opt == '--quiet':
<add> elif opt == '--logfile':
<ide> logger.addHandler(logging.FileHandler(val, mode='wb'))
<add> elif opt == '--quiet':
<ide> global _quiet
<ide> _quiet = True
<ide> | 1 |
Ruby | Ruby | move digest path calculation out of loop | 99d260298c3b70d70042c872841b5e81c938fd5b | <ide><path>actionview/lib/action_view/helpers/cache_helper.rb
<ide> def cache_unless(condition, name = {}, options = {}, &block)
<ide> #
<ide> # The digest will be generated using +virtual_path:+ if it is provided.
<ide> #
<del> def cache_fragment_name(name = {}, skip_digest: nil, virtual_path: nil)
<add> def cache_fragment_name(name = {}, skip_digest: nil, virtual_path: nil, digest_path: nil)
<ide> if skip_digest
<ide> name
<ide> else
<del> fragment_name_with_digest(name, virtual_path)
<add> fragment_name_with_digest(name, virtual_path, digest_path)
<add> end
<add> end
<add>
<add> def digest_path_from_virtual(virtual_path) # :nodoc:
<add> digest = Digestor.digest(name: virtual_path, finder: lookup_context, dependencies: view_cache_dependencies)
<add>
<add> if digest.present?
<add> "#{virtual_path}:#{digest}"
<add> else
<add> virtual_path
<ide> end
<ide> end
<ide>
<ide> private
<ide>
<del> def fragment_name_with_digest(name, virtual_path)
<add> def fragment_name_with_digest(name, virtual_path, digest_path)
<ide> virtual_path ||= @virtual_path
<ide>
<del> if virtual_path
<add> if virtual_path || digest_path
<ide> name = controller.url_for(name).split("://").last if name.is_a?(Hash)
<ide>
<del> if digest = Digestor.digest(name: virtual_path, finder: lookup_context, dependencies: view_cache_dependencies).presence
<del> [ "#{virtual_path}:#{digest}", name ]
<del> else
<del> [ virtual_path, name ]
<del> end
<add> digest_path ||= digest_path_from_virtual(virtual_path)
<add>
<add> [ digest_path, name ]
<ide> else
<ide> name
<ide> end
<ide><path>actionview/lib/action_view/renderer/partial_renderer/collection_caching.rb
<ide> def collection_by_cache_keys
<ide> end
<ide>
<ide> def expanded_cache_key(key)
<del> key = @view.combined_fragment_cache_key(@view.cache_fragment_name(key, virtual_path: @template.virtual_path))
<add> key = @view.combined_fragment_cache_key(@view.cache_fragment_name(key, virtual_path: @template.virtual_path, digest_path: digest_path))
<ide> key.frozen? ? key.dup : key # #read_multi & #write may require mutability, Dalli 2.6.0.
<ide> end
<ide>
<add> def digest_path
<add> @digest_path ||= @view.digest_path_from_virtual(@template.virtual_path)
<add> end
<add>
<ide> def fetch_or_cache_partial(cached_partials, order_by:)
<ide> order_by.map do |cache_key|
<ide> cached_partials.fetch(cache_key) do | 2 |
Python | Python | remove broken import | b82227e517bb7baced840b29d04a545a7b7557ae | <ide><path>rest_framework/relations.py
<ide> from django.utils.translation import ugettext_lazy as _
<ide> from rest_framework.fields import Field, WritableField
<ide> from rest_framework.reverse import reverse
<del>from urlparse import urlparse
<ide> from rest_framework.compat import urlparse
<ide> from rest_framework.compat import smart_text
<ide> import warnings | 1 |
Python | Python | increase type coverage for imap provider | 08dfd8cd00dae2d7aad53018af04428d933b1ceb | <ide><path>airflow/providers/imap/hooks/imap.py
<ide> def __init__(self, imap_conn_id: str = 'imap_default') -> None:
<ide> self.imap_conn_id = imap_conn_id
<ide> self.mail_client: Optional[imaplib.IMAP4_SSL] = None
<ide>
<del> def __enter__(self):
<add> def __enter__(self) -> 'ImapHook':
<ide> return self.get_conn()
<ide>
<ide> def __exit__(self, exc_type, exc_val, exc_tb):
<ide> def download_mail_attachments(
<ide> mail_folder: str = 'INBOX',
<ide> mail_filter: str = 'All',
<ide> not_found_mode: str = 'raise',
<del> ):
<add> ) -> None:
<ide> """
<ide> Downloads mail's attachments in the mail folder by its name to the local directory.
<ide>
<ide> def download_mail_attachments(
<ide>
<ide> self._create_files(mail_attachments, local_output_directory)
<ide>
<del> def _handle_not_found_mode(self, not_found_mode: str):
<add> def _handle_not_found_mode(self, not_found_mode: str) -> None:
<ide> if not_found_mode == 'raise':
<ide> raise AirflowException('No mail attachments found!')
<ide> if not_found_mode == 'warn':
<ide> def _check_mail_body(
<ide> return mail.get_attachments_by_name(name, check_regex, find_first=latest_only)
<ide> return []
<ide>
<del> def _create_files(self, mail_attachments: List, local_output_directory: str):
<add> def _create_files(self, mail_attachments: List, local_output_directory: str) -> None:
<ide> for name, payload in mail_attachments:
<ide> if self._is_symlink(name):
<ide> self.log.error('Can not create file because it is a symlink!')
<ide> def _create_files(self, mail_attachments: List, local_output_directory: str):
<ide> else:
<ide> self._create_file(name, payload, local_output_directory)
<ide>
<del> def _is_symlink(self, name: str):
<add> def _is_symlink(self, name: str) -> bool:
<ide> # IMPORTANT NOTE: os.path.islink is not working for windows symlinks
<ide> # See: https://stackoverflow.com/a/11068434
<ide> return os.path.islink(name)
<ide>
<del> def _is_escaping_current_directory(self, name: str):
<add> def _is_escaping_current_directory(self, name: str) -> bool:
<ide> return '../' in name
<ide>
<del> def _correct_path(self, name: str, local_output_directory: str):
<add> def _correct_path(self, name: str, local_output_directory: str) -> str:
<ide> return (
<ide> local_output_directory + name
<ide> if local_output_directory.endswith('/')
<ide> else local_output_directory + '/' + name
<ide> )
<ide>
<del> def _create_file(self, name: str, payload: Any, local_output_directory: str):
<add> def _create_file(self, name: str, payload: Any, local_output_directory: str) -> None:
<ide> file_path = self._correct_path(name, local_output_directory)
<ide>
<ide> with open(file_path, 'wb') as file:
<ide><path>airflow/providers/imap/sensors/imap_attachment.py
<ide> def __init__(
<ide> mail_filter='All',
<ide> conn_id='imap_default',
<ide> **kwargs,
<del> ):
<add> ) -> None:
<ide> super().__init__(**kwargs)
<ide>
<ide> self.attachment_name = attachment_name
<ide> def __init__(
<ide> self.mail_filter = mail_filter
<ide> self.conn_id = conn_id
<ide>
<del> def poke(self, context):
<add> def poke(self, context: dict) -> bool:
<ide> """
<ide> Pokes for a mail attachment on the mail server.
<ide> | 2 |
Python | Python | convert optimization_test.py to pytorch | 629bd006bfd7e6210dcc95198be9b65614e4f051 | <ide><path>optimization_test_pytorch.py
<add># coding=utf-8
<add># Copyright 2018 The Google AI Language Team Authors.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>from __future__ import absolute_import
<add>from __future__ import division
<add>from __future__ import print_function
<add>
<add>import optimization_pytorch as optimization
<add>import torch
<add>import unittest
<add>
<add>
<add>class OptimizationTest(unittest.TestCase):
<add>
<add> def assertListAlmostEqual(self, list1, list2, tol):
<add> self.assertEqual(len(list1), len(list2))
<add> for a, b in zip(list1, list2):
<add> self.assertAlmostEqual(a, b, delta=tol)
<add>
<add> def test_adam(self):
<add> w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
<add> x = torch.tensor([0.4, 0.2, -0.5])
<add> criterion = torch.nn.MSELoss(reduction='elementwise_mean')
<add> optimizer = optimization.BERTAdam(params={w}, lr=0.2, schedule='warmup_linear', warmup=0.1, t_total=100)
<add> for _ in range(100):
<add> # TODO Solve: reduction='elementwise_mean'=True not taken into account so division by x.size(0) is necessary
<add> loss = criterion(x, w) / x.size(0)
<add> loss.backward()
<add> optimizer.step()
<add> self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
<add>
<add>
<add>if __name__ == "__main__":
<add> unittest.main() | 1 |
Python | Python | add tests for the bitwise ufuncs | 3ed543e996b23418f78fc25e77e06b1f0e29f65f | <ide><path>numpy/core/tests/test_umath.py
<ide> def test_truth_table_bitwise(self):
<ide> assert_equal(np.bitwise_xor(arg1, arg2), out)
<ide>
<ide>
<add>class TestBitwiseUFuncs(TestCase):
<add>
<add> bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
<add>
<add> def test_values(self):
<add> for dt in self.bitwise_types:
<add> zeros = np.array([0], dtype=dt)
<add> ones = np.array([-1], dtype=dt)
<add> msg = "dt = '%s'" % dt.char
<add>
<add> assert_equal(np.bitwise_not(zeros), ones, err_msg=msg)
<add> assert_equal(np.bitwise_not(ones), zeros, err_msg=msg)
<add>
<add> assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg)
<add> assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg)
<add> assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg)
<add> assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg)
<add>
<add> assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg)
<add> assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg)
<add> assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg)
<add> assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg)
<add>
<add> assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg)
<add> assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg)
<add> assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg)
<add> assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg)
<add>
<add> def test_types(self):
<add> for dt in self.bitwise_types:
<add> zeros = np.array([0], dtype=dt)
<add> ones = np.array([-1], dtype=dt)
<add> msg = "dt = '%s'" % dt.char
<add>
<add> assert_(np.bitwise_not(zeros).dtype == dt, msg)
<add> assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg)
<add> assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
<add> assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
<add>
<add>
<add> def test_identity(self):
<add> assert_(np.bitwise_or.identity == 0, 'bitwise_or')
<add> assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
<add> assert_(np.bitwise_and.identity == -1, 'bitwise_and')
<add>
<add> def test_reduction(self):
<add> binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and)
<add>
<add> for dt in self.bitwise_types:
<add> zeros = np.array([0], dtype=dt)
<add> ones = np.array([-1], dtype=dt)
<add> for f in binary_funcs:
<add> msg = "dt: '%s', f: '%s'" % (dt, f)
<add> assert_equal(f.reduce(zeros), zeros, err_msg=msg)
<add> assert_equal(f.reduce(ones), ones, err_msg=msg)
<add>
<add> # Test empty reduction, no object dtype
<add> for dt in self.bitwise_types[:-1]:
<add> # No object array types
<add> empty = np.array([], dtype=dt)
<add> for f in binary_funcs:
<add> msg = "dt: '%s', f: '%s'" % (dt, f)
<add> tgt = np.array(f.identity, dtype=dt)
<add> res = f.reduce(empty)
<add> assert_equal(res, tgt, err_msg=msg)
<add> assert_(res.dtype == tgt.dtype, msg)
<add>
<add> # Empty object arrays use the identity. Note that the types may
<add> # differ, the actual type used is determined by the assign_identity
<add> # function and is not the same as the type returned by the identity
<add> # method.
<add> for f in binary_funcs:
<add> msg = "dt: '%s'" % (f,)
<add> empty = np.array([], dtype=object)
<add> tgt = f.identity
<add> res = f.reduce(empty)
<add> assert_equal(res, tgt, err_msg=msg)
<add>
<add> # Non-empty object arrays do not use the identity
<add> for f in binary_funcs:
<add> msg = "dt: '%s'" % (f,)
<add> btype = np.array([True], dtype=object)
<add> assert_(type(f.reduce(btype)) is bool, msg)
<add>
<add>
<ide> class TestInt(TestCase):
<ide> def test_logical_not(self):
<ide> x = np.ones(10, dtype=np.int16) | 1 |
Javascript | Javascript | fix missing types in xhrexample | 38f76b32c111854b7414af69bd1f3b251f8a8ba6 | <ide><path>Examples/UIExplorer/XHRExample.js
<ide> var PAGE_SIZE = 20;
<ide>
<ide> class FormUploader extends React.Component {
<ide>
<add> _isMounted: boolean;
<add> _fetchRandomPhoto: () => void;
<add> _addTextParam: () => void;
<add> _upload: () => void;
<add>
<ide> constructor(props) {
<ide> super(props);
<ide> this.state = { | 1 |
Java | Java | add backpressure support for defaultifempty() | 60924afdc5c05f51470141877a455e887a7b3415 | <ide><path>src/main/java/rx/Observable.java
<ide> import rx.exceptions.*;
<ide> import rx.functions.*;
<ide> import rx.internal.operators.*;
<add>import rx.internal.producers.SingleProducer;
<ide> import rx.internal.util.*;
<ide> import rx.observables.*;
<ide> import rx.observers.SafeSubscriber;
<ide> public final Observable<T> debounce(long timeout, TimeUnit unit, Scheduler sched
<ide> * items, or the items emitted by the source Observable
<ide> * @see <a href="http://reactivex.io/documentation/operators/defaultifempty.html">ReactiveX operators documentation: DefaultIfEmpty</a>
<ide> */
<del> public final Observable<T> defaultIfEmpty(T defaultValue) {
<del> return lift(new OperatorDefaultIfEmpty<T>(defaultValue));
<add> public final Observable<T> defaultIfEmpty(final T defaultValue) {
<add> //if empty switch to an observable that emits defaultValue and supports backpressure
<add> return switchIfEmpty(Observable.create(new OnSubscribe<T>() {
<add>
<add> @Override
<add> public void call(Subscriber<? super T> subscriber) {
<add> subscriber.setProducer(new SingleProducer<T>(subscriber, defaultValue));
<add> }}));
<ide> }
<ide>
<ide> /**
<ide><path>src/main/java/rx/internal/operators/OperatorDefaultIfEmpty.java
<del>/**
<del> * Copyright 2014 Netflix, Inc.
<del> *
<del> * Licensed under the Apache License, Version 2.0 (the "License"); you may not
<del> * use this file except in compliance with the License. You may obtain a copy of
<del> * the License at
<del> *
<del> * http://www.apache.org/licenses/LICENSE-2.0
<del> *
<del> * Unless required by applicable law or agreed to in writing, software
<del> * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
<del> * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
<del> * License for the specific language governing permissions and limitations under
<del> * the License.
<del> */
<del>package rx.internal.operators;
<del>
<del>import rx.Observable.Operator;
<del>import rx.Subscriber;
<del>
<del>/**
<del> * Returns the elements of the specified sequence or the specified default value
<del> * in a singleton sequence if the sequence is empty.
<del> * @param <T> the value type
<del> */
<del>public class OperatorDefaultIfEmpty<T> implements Operator<T, T> {
<del> final T defaultValue;
<del>
<del> public OperatorDefaultIfEmpty(T defaultValue) {
<del> this.defaultValue = defaultValue;
<del> }
<del>
<del> @Override
<del> public Subscriber<? super T> call(final Subscriber<? super T> child) {
<del> return new Subscriber<T>(child) {
<del> boolean hasValue;
<del> @Override
<del> public void onNext(T t) {
<del> hasValue = true;
<del> child.onNext(t);
<del> }
<del>
<del> @Override
<del> public void onError(Throwable e) {
<del> child.onError(e);
<del> }
<del>
<del> @Override
<del> public void onCompleted() {
<del> if (!hasValue) {
<del> try {
<del> child.onNext(defaultValue);
<del> } catch (Throwable e) {
<del> child.onError(e);
<del> return;
<del> }
<del> }
<del> child.onCompleted();
<del> }
<del>
<del> };
<del> }
<del>
<del>}
<ide><path>src/test/java/rx/internal/operators/OperatorDefaultIfEmptyTest.java
<ide> import rx.Observer;
<ide> import rx.Subscriber;
<ide> import rx.exceptions.TestException;
<add>import rx.observers.TestSubscriber;
<ide>
<ide> public class OperatorDefaultIfEmptyTest {
<ide>
<ide> public void onCompleted() {
<ide> verify(o, never()).onNext(any(Integer.class));
<ide> verify(o, never()).onCompleted();
<ide> }
<add>
<add> @Test
<add> public void testBackpressureEmpty() {
<add> TestSubscriber<Integer> ts = TestSubscriber.create(0);
<add> Observable.<Integer>empty().defaultIfEmpty(1).subscribe(ts);
<add> ts.assertNoValues();
<add> ts.assertNoTerminalEvent();
<add> ts.requestMore(1);
<add> ts.assertValue(1);
<add> ts.assertCompleted();
<add> }
<add>
<add> @Test
<add> public void testBackpressureNonEmpty() {
<add> TestSubscriber<Integer> ts = TestSubscriber.create(0);
<add> Observable.just(1,2,3).defaultIfEmpty(1).subscribe(ts);
<add> ts.assertNoValues();
<add> ts.assertNoTerminalEvent();
<add> ts.requestMore(2);
<add> ts.assertValues(1, 2);
<add> ts.requestMore(1);
<add> ts.assertValues(1, 2, 3);
<add> ts.assertCompleted();
<add> }
<ide> } | 3 |
Text | Text | move boron releases to lts column | 6d31bdb8722d609b38b74d04f95da07c62d29b8d | <ide><path>doc/changelogs/CHANGELOG_V6.md
<ide> </tr>
<ide> <tr>
<ide> <td valign="top">
<del> <a href="#6.9.4">6.9.4</a><br/>
<del></td>
<del><td valign="top">
<add><a href="#6.9.4">6.9.4</a><br/>
<ide> <a href="#6.9.3">6.9.3</a><br/>
<ide> <a href="#6.9.2">6.9.2</a><br/>
<ide> <a href="#6.9.1">6.9.1</a><br/>
<ide> <a href="#6.9.0">6.9.0</a><br/>
<add></td>
<add><td valign="top">
<ide> <a href="#6.8.1">6.8.1</a><br/>
<ide> <a href="#6.8.0">6.8.0</a><br/>
<ide> <a href="#6.7.0">6.7.0</a><br/> | 1 |
Python | Python | use sysconfig to determine if x86_64 linux | d669e7a716afd8eb5628cae2210d079c76f5b45c | <ide><path>numpy/core/setup.py
<ide> import os
<ide> import sys
<add>import sysconfig
<ide> import pickle
<ide> import copy
<ide> import warnings
<del>import platform
<ide> import textwrap
<ide> import glob
<ide> from os.path import join
<ide> def can_link_svml():
<ide> """
<ide> if NPY_DISABLE_SVML:
<ide> return False
<del> machine = platform.machine()
<del> system = platform.system()
<del> return "x86_64" in machine and system == "Linux"
<add> platform = sysconfig.get_platform()
<add> return "x86_64" in platform and "linux" in platform
<ide>
<ide> def check_svml_submodule(svmlpath):
<ide> if not os.path.exists(svmlpath + "/README.md"): | 1 |
PHP | PHP | add checks to fail early | f21f218d93d317636f16d4d8a4d6133938155f75 | <ide><path>src/Validation/Validation.php
<ide> public static function lengthBetween($check, int $min, int $max): bool
<ide> */
<ide> public static function cc($check, $type = 'fast', $deep = false, $regex = null): bool
<ide> {
<del> if (!is_scalar($check)) {
<add> if (!is_numeric($check)) {
<ide> return false;
<ide> }
<ide>
<del> $check = str_replace(['-', ' '], '', $check);
<add> $check = str_replace(['-', ' '], '', (string)$check);
<ide> if (mb_strlen($check) < 13) {
<ide> return false;
<ide> }
<ide> public static function falsey($check, array $falseyValues = []): bool
<ide> * - true => Any number of decimal places greater than 0, or a float|double. The '.' is required.
<ide> * - 1..N => Exactly that many number of decimal places. The '.' is required.
<ide> *
<del> * @param float $check The value the test for decimal.
<add> * @param mixed $check The value the test for decimal.
<ide> * @param int|bool|null $places Decimal places.
<ide> * @param string|null $regex If a custom regular expression is used, this is the only validation that will occur.
<ide> * @return bool Success
<ide> */
<ide> public static function decimal($check, $places = null, ?string $regex = null): bool
<ide> {
<add> if (!is_scalar($check)) {
<add> return false;
<add> }
<add>
<ide> if ($regex === null) {
<ide> $lnum = '[0-9]+';
<ide> $dnum = "[0-9]*[\.]{$lnum}";
<ide> public static function decimal($check, $places = null, ?string $regex = null): b
<ide> $decimalPoint = $formatter->getSymbol(NumberFormatter::DECIMAL_SEPARATOR_SYMBOL);
<ide> $groupingSep = $formatter->getSymbol(NumberFormatter::GROUPING_SEPARATOR_SYMBOL);
<ide>
<del> $check = str_replace([$groupingSep, $decimalPoint], ['', '.'], $check);
<add> $check = str_replace([$groupingSep, $decimalPoint], ['', '.'], (string)$check);
<ide>
<ide> return static::_check($check, $regex);
<ide> }
<ide> public static function isScalar($value): bool
<ide> /**
<ide> * Check that the input value is a 6 digits hex color.
<ide> *
<del> * @param string|array $check The value to check
<add> * @param mixed $check The value to check
<ide> * @return bool Success
<ide> */
<ide> public static function hexColor($check): bool | 1 |
Python | Python | add working graph container | 2ab9f0ef616c6d05124bdf9c81eb542d73f8e5b6 | <ide><path>keras/layers/containers.py
<ide> # -*- coding: utf-8 -*-
<ide> from __future__ import absolute_import
<add>from __future__ import print_function
<ide>
<ide> import theano.tensor as T
<del>from ..layers.core import Layer
<add>from ..layers.core import Layer, Merge
<ide> from six.moves import range
<ide>
<ide> def ndim_tensor(ndim):
<ide> def __init__(self, layers=[]):
<ide> for layer in layers:
<ide> self.add(layer)
<ide>
<del> def connect(self, layer):
<add> def set_previous(self, layer):
<ide> self.layers[0].previous = layer
<ide>
<ide> def add(self, layer):
<ide> self.layers.append(layer)
<ide> if len(self.layers) > 1:
<del> self.layers[-1].connect(self.layers[-2])
<add> self.layers[-1].set_previous(self.layers[-2])
<ide>
<ide> params, regularizers, constraints = layer.get_params()
<ide> self.params += params
<ide> def get_config(self):
<ide>
<ide> class Graph(Layer):
<ide> '''
<del> Implement a NN graph with arbitrary layer connections.
<add> Implement a NN graph with arbitrary layer connections,
<add> arbitrary number of inputs and arbitrary number of outputs.
<ide>
<del> Small difference with 'classical' layer API:
<del> input, get_input, get_output are lists of tensors instead of single tensors.
<add> Note: Graph can only be used as a layer
<add> (connect, input, get_input, get_output)
<add> when it has exactly one input and one output.
<ide>
<ide> inherited from Layer:
<ide> - get_params
<ide> - get_output_mask
<ide> - supports_masked_input
<del>
<del> not implemented:
<del> - connect
<add> - get_weights
<add> - set_weights
<ide> '''
<ide> def __init__(self):
<ide> self.namespace = set() # strings
<ide> def __init__(self):
<ide> self.input_order = [] # strings
<ide> self.outputs = {} # layer-like
<ide> self.output_order = [] # strings
<add>
<ide> self.params = []
<ide> self.regularizers = []
<ide> self.constraints = []
<ide>
<del> def connect(self):
<del> raise Exception('The Graph container does not implement the connect method.')
<add> def set_previous(self, layer):
<add> if len(self.inputs) != 1 or len(self.outputs) != 1:
<add> raise Exception('The Graph container can only be used as a layer \
<add> when it has exactly one input and one output.')
<add> self.inputs[self.input_order[0]].set_previous(layer)
<add>
<add> def get_input(self, train=False):
<add> if len(self.inputs) != 1 or len(self.outputs) != 1:
<add> raise Exception('The Graph container can only be used as a layer \
<add> when it has exactly one input and one output.')
<add> return self.inputs[self.input_order[0]].get_input(train)
<add>
<add> @property
<add> def input(self):
<add> return self.get_input()
<add>
<add> def get_output(self, train=False):
<add> if len(self.inputs) != 1 or len(self.outputs) != 1:
<add> raise Exception('The Graph container can only be used as a layer \
<add> when it has exactly one input and one output.')
<add> return self.outputs[self.output_order[0]].get_output(train)
<ide>
<ide> def add_input(self, name, ndim=2):
<ide> if name in self.namespace:
<ide> raise Exception('Duplicate node identifier: ' + name)
<ide> self.namespace.add(name)
<add> self.input_order.append(name)
<ide> layer = Layer() # empty layer
<ide> layer.input = ndim_tensor(ndim)
<add> layer.input.name = name
<ide> self.inputs[name] = layer
<ide>
<del> def get_input(self, train=False):
<del> # return list of tensors
<del> inputs = []
<del> for name in self.input_order:
<del> inputs.append(self.inputs[name].get_output(train))
<del> return inputs
<del>
<del> @property
<del> def input(self):
<del> return self.get_input()
<del>
<ide> def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat'):
<add> layer.set_name(name)
<ide> if name in self.namespace:
<ide> raise Exception('Duplicate node identifier: ' + name)
<ide> if input:
<ide> if input not in self.namespace:
<ide> raise Exception('Unknown identifier: ' + input)
<ide> if input in self.nodes:
<del> layer.connect(self.nodes(input))
<add> layer.set_previous(self.nodes[input])
<ide> elif input in self.inputs:
<del> layer.input = self.inputs[input]
<add> layer.set_previous(self.inputs[input])
<ide> if inputs:
<ide> to_merge = []
<ide> for n in inputs:
<ide> if n not in self.nodes:
<ide> raise Exception('Unknown identifier: ' + n)
<ide> to_merge.append(self.nodes[n])
<del> merge = Merge(to_merge, merge_mode=merge_mode)
<del> layer.connect(merge)
<add> merge = Merge(to_merge, mode=merge_mode)
<add> layer.set_previous(merge)
<ide>
<add> self.namespace.add(name)
<ide> self.nodes[name] = layer
<ide> params, regularizers, constraints = layer.get_params()
<ide> self.params += params
<ide> def add_output(self, name, input=None, inputs=[], merge_mode='concat'):
<ide> if input not in self.namespace:
<ide> raise Exception('Unknown identifier: ' + input)
<ide> if input in self.nodes:
<del> self.outputs[name] = self.nodes[inputs]
<add> self.outputs[name] = self.nodes[input]
<ide> elif input in self.inputs:
<del> layer = Layer()
<del> layer.input = self.inputs[input]
<del> self.ouputs[name] = layer
<del>
<add> self.ouputs[name] = self.inputs[input]
<ide> if inputs:
<ide> to_merge = []
<ide> for n in inputs:
<ide> if n not in self.nodes:
<ide> raise Exception('Unknown identifier: ' + n)
<ide> to_merge.append(self.nodes[n])
<del> merge = Merge(to_merge, merge_mode=merge_mode)
<del> self.outputs[name] = merge.get_output()
<del>
<del> def get_ouput(self, train=False):
<del> # return list of tensors
<del> outputs = []
<del> for name in self.output_order:
<del> outputs.append(self.outputs[name].get_output(train))
<del> return ouputs
<del>
<del> def get_weights(self):
<del> pass
<add> merge = Merge(to_merge, mode=merge_mode)
<add> self.outputs[name] = merge
<add> self.namespace.add(name)
<add> self.output_order.append(name)
<ide>
<del> def set_weights(self):
<add> def get_config(self):
<ide> pass
<ide>
<ide>
<ide><path>keras/layers/core.py
<ide> class Layer(object):
<ide> def __init__(self):
<ide> self.params = []
<ide>
<del> def connect(self, layer):
<add> def set_previous(self, layer):
<ide> if not hasattr(self, "get_output_mask") and layer.get_output_mask() is not None:
<ide> raise Exception("Attached non-masking layer to layer with masked output")
<ide> self.previous = layer
<ide>
<del> def get_output(self, train):
<del> return self.input
<add> def get_output(self, train=False):
<add> return self.get_input(train)
<ide>
<del> def get_input(self, train):
<add> def get_input(self, train=False):
<ide> if hasattr(self, 'previous'):
<ide> return self.previous.get_output(train=train)
<ide> else:
<ide> class MaskedLayer(Layer):
<ide> def supports_masked_input(self):
<ide> return True
<ide>
<del> def get_input_mask(self, train=None):
<add> def get_input_mask(self, train=False):
<ide> if hasattr(self, 'previous'):
<ide> return self.previous.get_output_mask(train)
<ide> else:
<ide> return None
<ide>
<del> def get_output_mask(self, train=None):
<add> def get_output_mask(self, train=False):
<ide> ''' The default output mask is just the input mask unchanged. Override this in your own
<ide> implementations if, for instance, you are reshaping the input'''
<ide> return self.get_input_mask(train)
<ide> def __init__(self, p):
<ide> super(Dropout, self).__init__()
<ide> self.p = p
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> if self.p > 0.:
<ide> retain_prob = 1. - self.p
<ide> def __init__(self, activation, target=0, beta=0.1):
<ide> self.target = target
<ide> self.beta = beta
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> return self.activation(X)
<ide>
<ide> def __init__(self, *dims):
<ide> super(Reshape, self).__init__()
<ide> self.dims = dims
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> nshape = make_tuple(X.shape[0], *self.dims)
<ide> return theano.tensor.reshape(X, nshape)
<ide> class Flatten(Layer):
<ide> def __init__(self):
<ide> super(Flatten, self).__init__()
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> size = theano.tensor.prod(X.shape) // X.shape[0]
<ide> nshape = (X.shape[0], size)
<ide> def __init__(self, n):
<ide> super(RepeatVector, self).__init__()
<ide> self.n = n
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> tensors = [X]*self.n
<ide> stacked = theano.tensor.stack(*tensors)
<ide> class Dense(Layer):
<ide> '''
<ide> Just your regular fully connected NN layer.
<ide> '''
<del> def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None,
<add> def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='linear', weights=None, name=None,
<ide> W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None):
<ide>
<ide> super(Dense, self).__init__()
<ide> def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='lin
<ide> if weights is not None:
<ide> self.set_weights(weights)
<ide>
<del> def get_output(self, train):
<add> if name is not None:
<add> self.set_name(name)
<add>
<add> def set_name(self, name):
<add> self.W.name = '%s_W' % name
<add> self.b.name = '%s_b' % name
<add>
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> output = self.activation(T.dot(X, self.W) + self.b)
<ide> return output
<ide> def __init__(self, l1=0., l2=0.):
<ide> activity_regularizer.set_layer(self)
<ide> self.regularizers = [activity_regularizer]
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> return self.get_input(train)
<ide>
<ide> def get_config(self):
<ide> def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='lin
<ide> if weights is not None:
<ide> self.set_weights(weights)
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> output = self.activation(T.dot(X.dimshuffle(1, 0, 2), self.W) + self.b)
<ide> return output.dimshuffle(1, 0, 2)
<ide> def __init__(self, encoder, decoder, output_reconstruction=True, tie_weights=Fal
<ide> self.encoder = encoder
<ide> self.decoder = decoder
<ide>
<del> self.decoder.connect(self.encoder)
<add> self.decoder.set_previous(self.encoder)
<ide>
<ide> self.params = []
<ide> self.regularizers = []
<ide> def __init__(self, encoder, decoder, output_reconstruction=True, tie_weights=Fal
<ide> if weights is not None:
<ide> self.set_weights(weights)
<ide>
<del> def connect(self, node):
<del> self.encoder.connect(node)
<add> def set_previous(self, node):
<add> self.encoder.set_previous(node)
<ide>
<ide> def get_weights(self):
<ide> weights = []
<ide> def get_input(self, train=False):
<ide> def input(self):
<ide> return self.encoder.input
<ide>
<del> def _get_hidden(self, train):
<add> def _get_hidden(self, train=False):
<ide> return self.encoder.get_output(train)
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> if not train and not self.output_reconstruction:
<ide> return self.encoder.get_output(train)
<ide>
<ide> def __init__(self, input_dim, output_dim, nb_feature=4, init='glorot_uniform', w
<ide> if weights is not None:
<ide> self.set_weights(weights)
<ide>
<del> def get_output(self, train):
<add> def get_output(self, train=False):
<ide> X = self.get_input(train)
<ide> # -- don't need activation since it's just linear.
<ide> output = T.max(T.dot(X, self.W) + self.b, axis=1)
<ide><path>keras/models.py
<ide> def standardize_weights(y, sample_weight=None, class_weight=None):
<ide>
<ide>
<ide> class Model(object):
<add> def _fit(self, f, ins, out_labels=[], batch_size=128, nb_epoch=100, verbose=1, callbacks=[], \
<add> validation_split=0., val_f=None, val_ins=None, shuffle=True):
<add> '''
<add> Abstract fit function for f(*ins). Assume that f returns a list, labelled by out_labels.
<add> '''
<add>
<add> do_validation = False
<add> if val_f and val_ins:
<add> do_validation = True
<add> if verbose:
<add> print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
<add> else:
<add> if 0 < validation_split < 1:
<add> do_validation = True
<add> split_at = int(len(ins[0]) * (1 - validation_split))
<add> (ins, ins_val) = (slice_X(ins, 0, split_at), slice_X(ins, split_at))
<add> if verbose:
<add> print("Train on %d samples, validate on %d samples" % (len(ins[0]), len(val_ins[0])))
<add>
<add> nb_train_sample = len(ins[0])
<add> index_array = np.arange(nb_train_sample)
<add>
<add> history = cbks.History()
<add> if verbose:
<add> callbacks = [history, cbks.BaseLogger()] + callbacks
<add> else:
<add> callbacks = [history] + callbacks
<add> callbacks = cbks.CallbackList(callbacks)
<add>
<add> callbacks._set_model(self)
<add> callbacks._set_params({
<add> 'batch_size': batch_size,
<add> 'nb_epoch': nb_epoch,
<add> 'nb_sample': nb_train_sample,
<add> 'verbose': verbose,
<add> 'do_validation': do_validation,
<add> })
<add> callbacks.on_train_begin()
<add>
<add> self.stop_training = False
<add> for epoch in range(nb_epoch):
<add> callbacks.on_epoch_begin(epoch)
<add> if shuffle:
<add> np.random.shuffle(index_array)
<add>
<add> batches = make_batches(nb_train_sample, batch_size)
<add> for batch_index, (batch_start, batch_end) in enumerate(batches):
<add> batch_ids = index_array[batch_start:batch_end]
<add> ins_batch = slice_X(ins, batch_ids)
<add>
<add> batch_logs = {}
<add> batch_logs['batch'] = batch_index
<add> batch_logs['size'] = len(batch_ids)
<add> callbacks.on_batch_begin(batch_index, batch_logs)
<add> outs = f(*ins_batch)
<add> if type(outs) != list:
<add> outs = [outs]
<add> for l, o in zip(out_labels, outs):
<add> batch_logs[l] = o
<add>
<add> callbacks.on_batch_end(batch_index, batch_logs)
<add>
<add> if batch_index == len(batches) - 1: # last batch
<add> # validation
<add> epoch_logs = {}
<add> if do_validation:
<add> # replace with self._evaluate
<add> val_outs = val_f(*val_ins)
<add> if type(val_outs) != list:
<add> val_outs = [val_outs]
<add> # same labels assumed
<add> for l, o in zip(out_labels, val_outs):
<add> epoch_logs['val_' + l] = o
<add>
<add> callbacks.on_epoch_end(epoch, epoch_logs)
<add> if self.stop_training:
<add> break
<add>
<add> callbacks.on_train_end()
<add> return history
<add>
<add>
<add>class Sequential(Model, containers.Sequential):
<add> '''
<add> Inherits from Model the following methods:
<add> - _fit
<add> - _predict
<add> - _evaluate
<add> Inherits from containers.Sequential the following methods:
<add> - __init__
<add> - add
<add> - get_output
<add> - get_input
<add> - get_weights
<add> - set_weights
<add> '''
<add>
<ide> def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
<ide> self.optimizer = optimizers.get(optimizer)
<ide> self.loss = weighted_objective(objectives.get(loss))
<ide> def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
<ide> self.weights = T.ones_like(self.y_train)
<ide>
<ide> train_loss = self.loss(self.y, self.y_train, self.weights)
<del> test_score = self.loss(self.y, self.y_test, self.weights)
<add> test_loss = self.loss(self.y, self.y_test, self.weights)
<add>
<add> train_loss.name = 'train_loss'
<add> test_loss.name = 'test_loss'
<add> self.y.name = 'y'
<ide>
<ide> if class_mode == "categorical":
<ide> train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
<ide> def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
<ide> updates=updates, allow_input_downcast=True, mode=theano_mode)
<ide> self._predict = theano.function(predict_ins, self.y_test,
<ide> allow_input_downcast=True, mode=theano_mode)
<del> self._test = theano.function(test_ins, test_score,
<add> self._test = theano.function(test_ins, test_loss,
<ide> allow_input_downcast=True, mode=theano_mode)
<del> self._test_with_acc = theano.function(test_ins, [test_score, test_accuracy],
<add> self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
<ide> allow_input_downcast=True, mode=theano_mode)
<ide>
<ide>
<ide> def test(self, X, y, accuracy=False):
<ide> else:
<ide> return self._test(*ins)
<ide>
<del>
<ide> def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
<ide> validation_split=0., validation_data=None, shuffle=True, show_accuracy=False,
<ide> class_weight=None, sample_weight=None):
<ide> def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
<ide> y = standardize_y(y)
<ide> sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)
<ide>
<del> do_validation = False
<add> val_f = None
<add> val_ins = None
<add> if validation_data or validation_split:
<add> if show_accuracy:
<add> val_f = self._test_with_acc
<add> else:
<add> val_f = self._test
<ide> if validation_data:
<ide> try:
<ide> X_val, y_val = validation_data
<ide> except:
<ide> raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val). \
<ide> X_val may be a numpy array or a list of numpy arrays depending on your model input.")
<del> do_validation = True
<ide> X_val = standardize_X(X_val)
<ide> y_val = standardize_y(y_val)
<del>
<del> if verbose:
<del> print("Train on %d samples, validate on %d samples" % (len(y), len(y_val)))
<del> else:
<del> if 0 < validation_split < 1:
<del> # If a validation split size is given (e.g. validation_split=0.2)
<del> # then split X into smaller X and X_val,
<del> # and split y into smaller y and y_val.
<del> do_validation = True
<del> split_at = int(len(y) * (1 - validation_split))
<del> (X, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
<del> (y, y_val) = (y[:split_at], y[split_at:])
<del> sample_weight = sample_weight[:split_at]
<del> if verbose:
<del> print("Train on %d samples, validate on %d samples" % (len(y), len(y_val)))
<add> val_ins = X_val + [y_val, np.ones(y_val.shape[:-1] + (1,))]
<ide>
<del> index_array = np.arange(len(y))
<del>
<del> if verbose:
<del> callbacks = [cbks.BaseLogger()] + callbacks
<del> callbacks = cbks.CallbackList([cbks.History()] + callbacks)
<del>
<del> callbacks._set_model(self)
<del> callbacks._set_params({
<del> 'batch_size': batch_size,
<del> 'nb_epoch': nb_epoch,
<del> 'nb_sample': len(y),
<del> 'verbose': verbose,
<del> 'do_validation': do_validation,
<del> 'show_accuracy': show_accuracy
<del> })
<del> callbacks.on_train_begin()
<del>
<del> self.stop_training = False
<del> for epoch in range(nb_epoch):
<del> callbacks.on_epoch_begin(epoch)
<del> if shuffle:
<del> np.random.shuffle(index_array)
<del>
<del> batches = make_batches(len(y), batch_size)
<del> for batch_index, (batch_start, batch_end) in enumerate(batches):
<del> batch_ids = index_array[batch_start:batch_end]
<del> X_batch = slice_X(X, batch_ids)
<del> y_batch = y[batch_ids]
<del> weight_batch = sample_weight[batch_ids]
<del>
<del> batch_logs = {}
<del> batch_logs['batch'] = batch_index
<del> batch_logs['size'] = len(batch_ids)
<del> callbacks.on_batch_begin(batch_index, batch_logs)
<del>
<del> ins = X_batch + [y_batch, weight_batch]
<del> if show_accuracy:
<del> loss, acc = self._train_with_acc(*ins)
<del> batch_logs['accuracy'] = acc
<del> else:
<del> loss = self._train(*ins)
<del> batch_logs['loss'] = loss
<del>
<del> callbacks.on_batch_end(batch_index, batch_logs)
<del>
<del> if batch_index == len(batches) - 1: # last batch
<del> # validation
<del> epoch_logs = {}
<del> if do_validation:
<del> if show_accuracy:
<del> val_loss, val_acc = self.evaluate(X_val, y_val, batch_size=batch_size, \
<del> verbose=0, show_accuracy=True)
<del> epoch_logs['val_accuracy'] = val_acc
<del> else:
<del> val_loss = self.evaluate(X_val, y_val, batch_size=batch_size, verbose=0)
<del> epoch_logs['val_loss'] = val_loss
<add> if show_accuracy:
<add> f = self._train_with_acc
<add> out_labels = ['loss', 'acc']
<add> else:
<add> f = self._train
<add> out_labels = ['loss']
<ide>
<del> callbacks.on_epoch_end(epoch, epoch_logs)
<del> if self.stop_training:
<del> break
<add> ins = X + [y, sample_weight]
<ide>
<del> callbacks.on_train_end()
<del> return callbacks.callbacks[0] # return history
<add> return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=callbacks, \
<add> validation_split=validation_split, val_f=val_f, val_ins=val_ins, shuffle=shuffle)
<ide>
<ide>
<ide> def predict(self, X, batch_size=128, verbose=1):
<ide> def evaluate(self, X, y, batch_size=128, show_accuracy=False, verbose=1, sample_
<ide> return tot_score / seen
<ide>
<ide>
<del>class Sequential(Model, containers.Sequential):
<del> '''
<del> Inherits from Model the following methods:
<del> - compile
<del> - train
<del> - test
<del> - evaluate
<del> - fit
<del> - predict
<del> - predict_proba
<del> - predict_classes
<del> Inherits from containers.Sequential the following methods:
<del> - add
<del> - get_output
<del> - get_input
<del> - get_weights
<del> - set_weights
<del> '''
<del> def __init__(self):
<del> self.layers = []
<del> self.params = [] # learnable
<del> self.regularizers = [] # same size as params
<del> self.constraints = [] # same size as params
<del>
<del>
<ide> def get_config(self, verbose=0):
<ide> layers = []
<ide> for i, l in enumerate(self.layers):
<ide> def load_weights(self, filepath):
<ide> weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
<ide> self.layers[k].set_weights(weights)
<ide> f.close()
<add>
<add>
<add>class Graph(containers.Graph):
<add> def compile(self, optimizer, loss, theano_mode='DebugMode'):
<add> # loss is a dictionary mapping output name to loss functions
<add> ys = []
<add> ys_train = []
<add> ys_test = []
<add> train_loss = 0.
<add> test_loss = 0.
<add> for output_name in self.output_order:
<add> loss_fn = loss[output_name]
<add> output = self.outputs[output_name]
<add> y_train = output.get_output(True)
<add> y_test = output.get_output(False)
<add> y = T.zeros_like(y_test)
<add> ys.append(y)
<add> ys_train.append(y_train)
<add> ys_test.append(y_test)
<add>
<add> train_loss += objectives.get(loss_fn)(y, y_train).mean()
<add> test_loss += objectives.get(loss_fn)(y, y_test).mean()
<add>
<add> train_loss.name = 'train_loss'
<add> test_loss.name = 'test_loss'
<add>
<add> ins = [self.inputs[name].input for name in self.input_order]
<add> train_ins = ins + ys
<add> test_ins = ins + ys
<add>
<add> for r in self.regularizers:
<add> train_loss = r(train_loss)
<add> self.optimizer = optimizers.get(optimizer)
<add> updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
<add>
<add> self._train = theano.function(train_ins, train_loss,
<add> updates=updates, allow_input_downcast=True, mode=theano_mode)
<add> self._test = theano.function(test_ins, test_loss,
<add> allow_input_downcast=True, mode=theano_mode)
<add> self._predict = theano.function(inputs=ins, outputs=ys_test,
<add> allow_input_downcast=True, mode=theano_mode)
<add>
<add> def train(self, data):
<add> # data is a dictionary mapping output and input names to arrays
<add> ins = [data[name] for name in self.input_order] + [data[name] for name in self.output_order]
<add> return self._train(*ins)
<add>
<add> def test(self, data):
<add> # data is a dictionary mapping input names to arrays
<add> ins = [data[name] for name in self.input_order] + [data[name] for name in self.output_order]
<add> return self._test(*ins)
<add>
<add> def predict(self, data):
<add> # data is a dictionary mapping input names to arrays
<add> ins = [data[name] for name in self.input_order]
<add> return self._predict(*ins)
<add>
<add>
<ide><path>tests/manual/check_graph.py
<add>from keras.models import Graph, Sequential
<add>from keras.layers import containers
<add>from keras.layers.core import Dense
<add>import numpy as np
<add>
<add>X = np.random.random((100, 32))
<add>X2 = np.random.random((100, 32))
<add>y = np.random.random((100, 4))
<add>y2 = np.random.random((100, 4))
<add>
<add>print 'test a non-sequential graph with 1 input and 1 output'
<add>graph = Graph()
<add>graph.add_input(name='input1', ndim=2)
<add>
<add>graph.add_node(Dense(32, 16), name='dense1', input='input1')
<add>graph.add_node(Dense(32, 4), name='dense2', input='input1')
<add>graph.add_node(Dense(16, 4), name='dense3', input='dense1')
<add>
<add>graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
<add>graph.compile('rmsprop', {'output1':'mse'})
<add>
<add>out = graph.predict({'input1':X})
<add>loss = graph.test({'input1':X, 'output1':y})
<add>loss = graph.train({'input1':X, 'output1':y})
<add>print loss
<add>
<add>
<add>print 'test a more complex non-sequential graph with 1 input and 1 output'
<add>graph = Graph()
<add>graph.add_input(name='input1', ndim=2)
<add>
<add>graph.add_node(Dense(32, 16), name='dense1', input='input1')
<add>graph.add_node(Dense(32, 4), name='dense2', input='input1')
<add>
<add>graph.add_node(Dense(4, 16), name='dense3', input='dense2')
<add>graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum')
<add>
<add>graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum')
<add>graph.compile('rmsprop', {'output1':'mse'})
<add>
<add>out = graph.predict({'input1':X})
<add>loss = graph.test({'input1':X, 'output1':y})
<add>loss = graph.train({'input1':X, 'output1':y})
<add>print loss
<add>
<add>
<add>print 'test a non-sequential graph with 2 inputs and 1 output'
<add>graph = Graph()
<add>graph.add_input(name='input1', ndim=2)
<add>graph.add_input(name='input2', ndim=2)
<add>
<add>graph.add_node(Dense(32, 16), name='dense1', input='input1')
<add>graph.add_node(Dense(32, 4), name='dense2', input='input2')
<add>graph.add_node(Dense(16, 4), name='dense3', input='dense1')
<add>
<add>graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
<add>graph.compile('rmsprop', {'output1':'mse'})
<add>
<add>out = graph.predict({'input1':X, 'input2':X2})
<add>loss = graph.test({'input1':X, 'input2':X2, 'output1':y})
<add>loss = graph.train({'input1':X, 'input2':X2, 'output1':y})
<add>print loss
<add>
<add>
<add>print 'test a non-sequential graph with 1 input and 2 outputs'
<add>graph = Graph()
<add>graph.add_input(name='input1', ndim=2)
<add>
<add>graph.add_node(Dense(32, 16), name='dense1', input='input1')
<add>graph.add_node(Dense(32, 4), name='dense2', input='input1')
<add>graph.add_node(Dense(16, 4), name='dense3', input='dense1')
<add>
<add>graph.add_output(name='output1', input='dense2')
<add>graph.add_output(name='output2', input='dense3')
<add>graph.compile('rmsprop', {'output1':'mse', 'output2':'mse'})
<add>
<add>out = graph.predict({'input1':X})
<add>loss = graph.test({'input1':X, 'output1':y, 'output2':y2})
<add>loss = graph.train({'input1':X, 'output1':y, 'output2':y2})
<add>print loss
<add>
<add>
<add>print 'test layer-like API'
<add>
<add>graph = containers.Graph()
<add>graph.add_input(name='input1', ndim=2)
<add>graph.add_node(Dense(32, 16), name='dense1', input='input1')
<add>graph.add_node(Dense(32, 4), name='dense2', input='input1')
<add>graph.add_node(Dense(16, 4), name='dense3', input='dense1')
<add>graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
<add>
<add>seq = Sequential()
<add>seq.add(Dense(32, 32, name='first_seq_dense'))
<add>seq.add(graph)
<add>seq.add(Dense(4, 4, name='last_seq_dense'))
<add>
<add>print seq.params
<add>print seq.layers
<add>print 'input:'
<add>print seq.get_input()
<add>print 'output:'
<add>print seq.get_output()
<add>seq.compile('rmsprop', 'mse')
<add>
<add>loss = seq.fit(X, y, batch_size=10, nb_epoch=1)
<add>print loss
<ide>\ No newline at end of file | 4 |
Ruby | Ruby | add documentation on app_generators | adfd43a4daf08cc9a801a0b6a039dd109ce4e81f | <ide><path>railties/lib/rails/engine.rb
<ide> module Rails
<ide> # end
<ide> # end
<ide> #
<add> # == Generators
<add> #
<add> # You can set up generators for engine with config.generators method:
<add> #
<add> # class MyEngine < Rails::Engine
<add> # config.generators do |g|
<add> # g.orm :active_record
<add> # g.template_engine :erb
<add> # g.test_framework :test_unit
<add> # end
<add> # end
<add> #
<add> # You can also set generators for application by using config.app_generators:
<add> #
<add> # class MyEngine < Rails::Engine
<add> # # note that you can also pass block to app_generators in the same way you
<add> # # can pass it to generators method
<add> # config.app_generators.orm :datamapper
<add> # end
<add> #
<ide> # == Paths
<ide> #
<ide> # Since Rails 3.0, both your Application and Engines do not have hardcoded paths. | 1 |
Javascript | Javascript | add a whole ton of missing documentation | 352f4064e9720b91b77ad477321492074899dc5b | <ide><path>src/git-repository-async.js
<ide> export default class GitRepositoryAsync {
<ide> }
<ide> }
<ide>
<add> // Public: Destroy this {GitRepositoryAsync} object.
<add> //
<add> // This destroys any tasks and subscriptions and releases the underlying
<add> // libgit2 repository handle. This method is idempotent.
<ide> destroy () {
<ide> if (this.emitter) {
<ide> this.emitter.emit('did-destroy')
<ide> export default class GitRepositoryAsync {
<ide> // Event subscription
<ide> // ==================
<ide>
<add> // Public: Invoke the given callback when this GitRepositoryAsync's destroy()
<add> // method is invoked.
<add> //
<add> // * `callback` {Function}
<add> //
<add> // Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
<ide> onDidDestroy (callback) {
<ide> return this.emitter.on('did-destroy', callback)
<ide> }
<ide>
<add> // Public: Invoke the given callback when a specific file's status has
<add> // changed. When a file is updated, reloaded, etc, and the status changes, this
<add> // will be fired.
<add> //
<add> // * `callback` {Function}
<add> // * `event` {Object}
<add> // * `path` {String} the old parameters the decoration used to have
<add> // * `pathStatus` {Number} representing the status. This value can be passed to
<add> // {::isStatusModified} or {::isStatusNew} to get more information.
<add> //
<add> // Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
<ide> onDidChangeStatus (callback) {
<ide> return this.emitter.on('did-change-status', callback)
<ide> }
<ide>
<add> // Public: Invoke the given callback when a multiple files' statuses have
<add> // changed. For example, on window focus, the status of all the paths in the
<add> // repo is checked. If any of them have changed, this will be fired. Call
<add> // {::getPathStatus(path)} to get the status for your path of choice.
<add> //
<add> // * `callback` {Function}
<add> //
<add> // Returns a {Disposable} on which `.dispose()` can be called to unsubscribe.
<ide> onDidChangeStatuses (callback) {
<ide> return this.emitter.on('did-change-statuses', callback)
<ide> }
<ide> export default class GitRepositoryAsync {
<ide> //
<ide> // Returns a {Promise} which resolves to a {String}.
<ide> getShortHead (_path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => repo.getCurrentBranch())
<ide> .then(branch => branch.shorthand())
<ide> }
<ide> export default class GitRepositoryAsync {
<ide> // * `ahead` The {Number} of commits ahead.
<ide> // * `behind` The {Number} of commits behind.
<ide> getAheadBehindCount (reference, _path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => Promise.all([repo, repo.getBranch(reference)]))
<ide> .then(([repo, local]) => {
<ide> const upstream = Git.Branch.upstream(local)
<ide> export default class GitRepositoryAsync {
<ide> // Returns a {Promise} which resolves to the {String} git configuration value
<ide> // specified by the key.
<ide> getConfigValue (key, _path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => repo.configSnapshot())
<ide> .then(config => config.getStringBuf(key))
<ide> .catch(_ => null)
<ide> export default class GitRepositoryAsync {
<ide> // Returns a {Promise} which resolves to a {String} branch name such as
<ide> // `refs/remotes/origin/master`.
<ide> getUpstreamBranch (_path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => repo.getCurrentBranch())
<ide> .then(branch => Git.Branch.upstream(branch))
<ide> }
<ide> export default class GitRepositoryAsync {
<ide> // * `remotes` An {Array} of remote reference names.
<ide> // * `tags` An {Array} of tag reference names.
<ide> getReferences (_path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => repo.getReferences(Git.Reference.TYPE.LISTALL))
<ide> .then(refs => {
<ide> const heads = []
<ide> export default class GitRepositoryAsync {
<ide> // Returns a {Promise} which resolves to the current {String} SHA for the
<ide> // given reference.
<ide> getReferenceTarget (reference, _path) {
<del> return this._getRepo(_path)
<add> return this.getRepo(_path)
<ide> .then(repo => Git.Reference.nameToId(repo, reference))
<ide> .then(oid => oid.tostrS())
<ide> }
<ide>
<ide> // Reading Status
<ide> // ==============
<ide>
<add> // Public: Resolves true if the given path is modified.
<add> //
<add> // * `path` The {String} path to check.
<add> //
<add> // Returns a {Promise} which resolves to a {Boolean} that's true if the `path`
<add> // is modified.
<ide> isPathModified (_path) {
<ide> return this._filterStatusesByPath(_path).then(statuses => {
<ide> return statuses.filter(status => status.isModified()).length > 0
<ide> })
<ide> }
<ide>
<add> // Public: Resolves true if the given path is new.
<add> //
<add> // * `path` The {String} path to check.
<add> //
<add> // Returns a {Promise} which resolves to a {Boolean} that's true if the `path`
<add> // is new.
<ide> isPathNew (_path) {
<ide> return this._filterStatusesByPath(_path).then(statuses => {
<ide> return statuses.filter(status => status.isNew()).length > 0
<ide> })
<ide> }
<ide>
<add> // Public: Is the given path ignored?
<add> //
<add> // * `path` The {String} path to check.
<add> //
<add> // Returns a {Promise} which resolves to a {Boolean} that's true if the `path`
<add> // is ignored.
<ide> isPathIgnored (_path) {
<ide> return this.repoPromise.then(repo => Git.Ignore.pathIsIgnored(repo, _path))
<ide> }
<ide> export default class GitRepositoryAsync {
<ide> //
<ide> // Returns a promise resolving to a {Number} representing the status. This value can be passed to
<ide> // {::isStatusModified} or {::isStatusNew} to get more information.
<del>
<ide> getDirectoryStatus (directoryPath) {
<ide> let relativePath
<ide> // XXX _filterSBD already gets repoPromise
<ide> export default class GitRepositoryAsync {
<ide> .then(relativePath => this.pathStatusCache[relativePath])
<ide> }
<ide>
<del> isStatusNew (statusBit) {
<del> return (statusBit & newStatusFlags) > 0
<del> }
<del>
<add> // Public: Returns true if the given status indicates modification.
<add> //
<add> // * `statusBit` A {Number} representing the status.
<add> //
<add> // Returns a {Boolean} that's true if the `statusBit` indicates modification.
<ide> isStatusModified (statusBit) {
<ide> return (statusBit & modifiedStatusFlags) > 0
<ide> }
<ide>
<add> // Public: Returns true if the given status indicates a new path.
<add> //
<add> // * `statusBit` A {Number} representing the status.
<add> //
<add> // Returns a {Boolean} that's true if the `statusBit` indicates a new path.
<add> isStatusNew (statusBit) {
<add> return (statusBit & newStatusFlags) > 0
<add> }
<add>
<add> // Public: Returns true if the given status indicates the path is staged.
<add> //
<add> // * `statusBit` A {Number} representing the status.
<add> //
<add> // Returns a {Boolean} that's true if the `statusBit` indicates the path is
<add> // staged.
<ide> isStatusStaged (statusBit) {
<ide> return (statusBit & indexStatusFlags) > 0
<ide> }
<ide>
<add> // Public: Returns true if the given status indicates the path is ignored.
<add> //
<add> // * `statusBit` A {Number} representing the status.
<add> //
<add> // Returns a {Boolean} that's true if the `statusBit` indicates the path is
<add> // ignored.
<ide> isStatusIgnored (statusBit) {
<ide> return (statusBit & (1 << 14)) > 0
<ide> }
<ide>
<add> // Public: Returns true if the given status indicates the path is deleted.
<add> //
<add> // * `statusBit` A {Number} representing the status.
<add> //
<add> // Returns a {Boolean} that's true if the `statusBit` indicates the path is
<add> // deleted.
<ide> isStatusDeleted (statusBit) {
<ide> return (statusBit & deletedStatusFlags) > 0
<ide> }
<ide>
<del> _getDiffHunks (diff) {
<del> return diff.patches()
<del> .then(patches => Promise.all(patches.map(p => p.hunks()))) // patches :: Array<Patch>
<del> .then(hunks => _.flatten(hunks)) // hunks :: Array<Array<Hunk>>
<del> }
<del>
<del> _getDiffLines (diff) {
<del> return this._getDiffHunks(diff)
<del> .then(hunks => Promise.all(hunks.map(h => h.lines())))
<del> .then(lines => _.flatten(lines)) // lines :: Array<Array<Line>>
<del> }
<del>
<ide> // Retrieving Diffs
<ide> // ================
<ide> // Public: Retrieves the number of lines added and removed to a path.
<ide> export default class GitRepositoryAsync {
<ide> })
<ide> }
<ide>
<del> _diffBlobToBuffer (blob, buffer, options) {
<del> const hunks = []
<del> const hunkCallback = (delta, hunk, payload) => {
<del> hunks.push(hunk)
<del> }
<del> return Git.Diff.blobToBuffer(blob, null, buffer, null, null, options, null, null, hunkCallback, null, null).then(_ => hunks)
<del> }
<del>
<ide> // Public: Retrieves the line diffs comparing the `HEAD` version of the given
<ide> // path and the given text.
<ide> //
<ide> export default class GitRepositoryAsync {
<ide> .then(() => this.refreshStatusForPath(_path))
<ide> }
<ide>
<del> _createBranch (name) {
<del> return this.repoPromise
<del> .then(repo => Promise.all([repo, repo.getHeadCommit()]))
<del> .then(([repo, commit]) => repo.createBranch(name, commit))
<del> }
<del>
<ide> // Public: Checks out a branch in your repository.
<ide> //
<ide> // * `reference` The {String} reference to checkout.
<ide> export default class GitRepositoryAsync {
<ide> }).then(filePath => this.checkoutHead(filePath))
<ide> }
<ide>
<add> // Create a new branch with the given name.
<add> //
<add> // name :: String
<add> // The name of the new branch.
<add> //
<add> // Returns :: Promise<NodeGit.Ref>
<add> // A reference to the created branch.
<add> _createBranch (name) {
<add> return this.repoPromise
<add> .then(repo => Promise.all([repo, repo.getHeadCommit()]))
<add> .then(([repo, commit]) => repo.createBranch(name, commit))
<add> }
<add>
<add> // Get all the hunks in the diff.
<add> //
<add> // diff :: NodeGit.Diff
<add> //
<add> // Returns :: Promise<Array<NodeGit.Hunk>>
<add> _getDiffHunks (diff) {
<add> return diff.patches()
<add> .then(patches => Promise.all(patches.map(p => p.hunks()))) // patches :: Array<Patch>
<add> .then(hunks => _.flatten(hunks)) // hunks :: Array<Array<Hunk>>
<add> }
<add>
<add> // Get all the lines contained in the diff.
<add> //
<add> // diff :: NodeGit.Diff
<add> //
<add> // Returns :: Promise<Array<NodeGit.Line>>
<add> _getDiffLines (diff) {
<add> return this._getDiffHunks(diff)
<add> .then(hunks => Promise.all(hunks.map(h => h.lines())))
<add> .then(lines => _.flatten(lines)) // lines :: Array<Array<Line>>
<add> }
<add>
<add> // Diff the given blob and buffer with the provided options.
<add> //
<add> // blob :: NodeGit.Blob
<add> // buffer :: String
<add> // options :: NodeGit.DiffOptions
<add> //
<add> // Returns :: Promise<Array<NodeGit.Hunk>>
<add> _diffBlobToBuffer (blob, buffer, options) {
<add> const hunks = []
<add> const hunkCallback = (delta, hunk, payload) => {
<add> hunks.push(hunk)
<add> }
<add> return Git.Diff.blobToBuffer(blob, null, buffer, null, null, options, null, null, hunkCallback, null, null)
<add> .then(_ => hunks)
<add> }
<add>
<ide> // Get the current branch and update this.branch.
<ide> //
<ide> // Returns :: Promise<String>
<ide> export default class GitRepositoryAsync {
<ide> .then(branchName => this.branch = branchName)
<ide> }
<ide>
<add> // Refresh the cached ahead/behind count with the given branch.
<add> //
<add> // branchName :: String
<add> // The name of the branch whose ahead/behind should be used for
<add> // the refresh.
<add> //
<add> // Returns :: Promise<null>
<ide> _refreshAheadBehindCount (branchName) {
<ide> return this.getAheadBehindCount(branchName)
<ide> .then(counts => this.upstreamByPath['.'] = counts)
<ide> }
<ide>
<add> // Refresh the cached status.
<add> //
<add> // Returns :: Promise<null>
<ide> _refreshStatus () {
<ide> this._refreshingCount++
<ide>
<ide> export default class GitRepositoryAsync {
<ide> return Promise.all([status, branch, aheadBehind]).then(_ => null)
<ide> }
<ide>
<del> // Section: Private
<del> // ================
<del>
<del> _isRefreshing () {
<del> return this._refreshingCount === 0
<del> }
<del>
<del> _destroyed() {
<del> return this.repoPromise == null
<del> }
<del>
<del> _getRepo (_path) {
<add> // Get the NodeGit repository for the given path.
<add> //
<add> // path :: Optional<String>
<add> // The path within the repository. This is only needed if you want
<add> // to get the repository for that path if it is a submodule.
<add> //
<add> // Returns :: Promise<NodeGit.Repository>
<add> getRepo (_path) {
<ide> if (this._destroyed()) {
<ide> return Promise.reject(new Error('Repository has been destroyed'))
<ide> }
<ide> export default class GitRepositoryAsync {
<ide> })
<ide> }
<ide>
<add> // Section: Private
<add> // ================
<add>
<add> // Is the repository currently refreshing its status?
<add> //
<add> // Returns :: Bool
<add> _isRefreshing () {
<add> return this._refreshingCount === 0
<add> }
<add>
<add> // Has the repository been destroyed?
<add> //
<add> // Returns :: Bool
<add> _destroyed() {
<add> return this.repoPromise == null
<add> }
<add>
<add> // Subscribe to events on the given buffer.
<ide> subscribeToBuffer (buffer) {
<ide> const bufferSubscriptions = new CompositeDisposable()
<ide>
<ide> export default class GitRepositoryAsync {
<ide> return
<ide> }
<ide>
<add> // Get the status for the given path.
<add> //
<add> // path :: String
<add> // The path whose status is wanted.
<add> //
<add> // Returns :: Promise<NodeGit.StatusFile>
<add> // The status for the path.
<ide> _filterStatusesByPath (_path) {
<del> // Surely I'm missing a built-in way to do this
<add> // TODO: Is there a more efficient way to do this?
<ide> let basePath = null
<ide> return this.repoPromise
<ide> .then(repo => {
<ide> export default class GitRepositoryAsync {
<ide> })
<ide> }
<ide>
<add> // Get the status for everything in the given directory.
<add> //
<add> // directoryPath :: String
<add> // The directory whose status is wanted.
<add> //
<add> // Returns :: Promise<Array<NodeGit.StatusFile>>
<add> // The status for every file in the directory.
<ide> _filterStatusesByDirectory (directoryPath) {
<ide> return this.repoPromise
<ide> .then(repo => repo.getStatus()) | 1 |
Javascript | Javascript | increase runinasyncscope() coverage | 73b8909501e5f68b7b4131ea1bf13dd454b18749 | <ide><path>test/parallel/test-async-hooks-run-in-async-scope-this-arg.js
<add>'use strict';
<add>
<add>// Test that passing thisArg to runInAsyncScope() works.
<add>
<add>const common = require('../common');
<add>const assert = require('assert');
<add>const { AsyncResource } = require('async_hooks');
<add>
<add>const thisArg = {};
<add>
<add>const res = new AsyncResource('fhqwhgads');
<add>
<add>function callback() {
<add> assert.strictEqual(this, thisArg);
<add>}
<add>
<add>res.runInAsyncScope(common.mustCall(callback), thisArg); | 1 |
Text | Text | fix typo for object oriented programming article | a7ad6dff118ccec62971f8b793c04da06517e366 | <ide><path>guide/english/design-patterns/object-oriented-programming/index.md
<ide> In procedural programming, we simply create variables and change them when requi
<ide>
<ide> Another extremely useful concept is that of inheritance. The idea is that a class can inherit attributes and behaviour from a base class. For example, while creating a game, we have a player and enemy. We can create a base class called person, and give it attributes like name, age, gender, etc. Person's behaviour can be walk and jump. A player and enemy can then inherit these "qualities" from person, and can have added qualities like kill, score, eat, etc.
<ide>
<del>This helps in reusing code and making you rcode structure much more clean. Data hiding is another cool feature. In OO, we have the notion of private and public attributes. Private attributes can be accessed and modified only by methods of that particular class, while public data can be modified from anywhere in the program (within scope obviously).
<add>This helps in reusing code and making your code structure much more clean. Data hiding is another cool feature. In OO, we have the notion of private and public attributes. Private attributes can be accessed and modified only by methods of that particular class, while public data can be modified from anywhere in the program (within scope obviously).
<ide>
<ide> OO programming represents problem as a real-life statement and thus helps us solve those problems in an effective way. An object in OO programming represents a real-life entity. A class represents a blueprint of a number of objects. An object can be considered as a instance of a class.
<ide> | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.