content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
|---|---|---|---|---|---|
Go
|
Go
|
add more overlay tests and benchmarks
|
246e99303195b6ce4c357ceb5925990aa1890288
|
<ide><path>daemon/graphdriver/overlay/overlay.go
<ide> import (
<ide>
<ide> "github.com/docker/docker/daemon/graphdriver"
<ide> "github.com/docker/docker/pkg/archive"
<del> "github.com/docker/docker/pkg/chrootarchive"
<ide> "github.com/docker/docker/pkg/idtools"
<ide>
<ide> "github.com/docker/docker/pkg/mount"
<ide> func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size
<ide> }
<ide>
<ide> options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps}
<del> if size, err = chrootarchive.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil {
<add> if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil {
<ide> return 0, err
<ide> }
<ide>
<ide><path>daemon/graphdriver/overlay/overlay_test.go
<ide> package overlay
<ide> import (
<ide> "testing"
<ide>
<add> "github.com/docker/docker/daemon/graphdriver"
<ide> "github.com/docker/docker/daemon/graphdriver/graphtest"
<add> "github.com/docker/docker/pkg/archive"
<ide> )
<ide>
<add>func init() {
<add> // Do not sure chroot to speed run time and allow archive
<add> // errors or hangs to be debugged directly from the test process.
<add> graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
<add>}
<add>
<ide> // This avoids creating a new driver for each test if all tests are run
<ide> // Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown
<ide> func TestOverlaySetup(t *testing.T) {
<ide> func TestOverlayCreateSnap(t *testing.T) {
<ide> graphtest.DriverTestCreateSnap(t, "overlay")
<ide> }
<ide>
<add>func TestOverlay50LayerRead(t *testing.T) {
<add> graphtest.DriverTestDeepLayerRead(t, 50, "overlay")
<add>}
<add>
<add>func TestOverlayDiffApply10Files(t *testing.T) {
<add> graphtest.DriverTestDiffApply(t, 10, "overlay")
<add>}
<add>
<add>func TestOverlayChanges(t *testing.T) {
<add> graphtest.DriverTestChanges(t, "overlay")
<add>}
<add>
<ide> func TestOverlayTeardown(t *testing.T) {
<ide> graphtest.PutDriver(t)
<ide> }
<add>
<add>// Benchmarks should always setup new driver
<add>
<add>func BenchmarkExists(b *testing.B) {
<add> graphtest.DriverBenchExists(b, "overlay")
<add>}
<add>
<add>func BenchmarkGetEmpty(b *testing.B) {
<add> graphtest.DriverBenchGetEmpty(b, "overlay")
<add>}
<add>
<add>func BenchmarkDiffBase(b *testing.B) {
<add> graphtest.DriverBenchDiffBase(b, "overlay")
<add>}
<add>
<add>func BenchmarkDiffSmallUpper(b *testing.B) {
<add> graphtest.DriverBenchDiffN(b, 10, 10, "overlay")
<add>}
<add>
<add>func BenchmarkDiff10KFileUpper(b *testing.B) {
<add> graphtest.DriverBenchDiffN(b, 10, 10000, "overlay")
<add>}
<add>
<add>func BenchmarkDiff10KFilesBottom(b *testing.B) {
<add> graphtest.DriverBenchDiffN(b, 10000, 10, "overlay")
<add>}
<add>
<add>func BenchmarkDiffApply100(b *testing.B) {
<add> graphtest.DriverBenchDiffApplyN(b, 100, "overlay")
<add>}
<add>
<add>func BenchmarkDiff20Layers(b *testing.B) {
<add> graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay")
<add>}
<add>
<add>func BenchmarkRead20Layers(b *testing.B) {
<add> graphtest.DriverBenchDeepLayerRead(b, 20, "overlay")
<add>}
| 2
|
Javascript
|
Javascript
|
fix socket reuse with agent
|
3e02636bcbf59517e25ad5dd2ebb954d5de95b81
|
<ide><path>lib/_http_agent.js
<ide> Agent.prototype.addRequest = function addRequest(req, options, port/*legacy*/,
<ide> var socket = this.freeSockets[name].shift();
<ide> // Assign the handle a new asyncId and run any init() hooks.
<ide> socket._handle.asyncReset();
<add> socket[async_id_symbol] = socket._handle.getAsyncId();
<ide> debug('have free socket');
<ide>
<ide> // don't leak
<ide><path>test/parallel/test-async-hooks-http-agent.js
<add>'use strict';
<add>const common = require('../common');
<add>const assert = require('assert');
<add>const async_id_symbol = process.binding('async_wrap').async_id_symbol;
<add>const http = require('http');
<add>
<add>// Regression test for https://github.com/nodejs/node/issues/13325
<add>// Checks that an http.Agent properly asyncReset()s a reused socket handle, and
<add>// re-assigns the fresh async id to the reused `net.Socket` instance.
<add>
<add>// Make sure a single socket is transpartently reused for 2 requests.
<add>const agent = new http.Agent({
<add> keepAlive: true,
<add> keepAliveMsecs: Infinity,
<add> maxSockets: 1
<add>});
<add>
<add>const server = http.createServer(common.mustCall((req, res) => {
<add> req.once('data', common.mustCallAtLeast(() => {
<add> res.writeHead(200, {'Content-Type': 'text/plain'});
<add> res.write('foo');
<add> }));
<add> req.on('end', common.mustCall(() => {
<add> res.end('bar');
<add> }));
<add>}, 2)).listen(0, common.mustCall(() => {
<add> const port = server.address().port;
<add> const payload = 'hello world';
<add>
<add> // First request. This is useless except for adding a socket to the
<add> // agent’s pool for reuse.
<add> const r1 = http.request({
<add> agent, port, method: 'POST'
<add> }, common.mustCall((res) => {
<add> // Remember which socket we used.
<add> const socket = res.socket;
<add> const asyncIdAtFirstRequest = socket[async_id_symbol];
<add> assert.ok(asyncIdAtFirstRequest > 0, `${asyncIdAtFirstRequest} > 0`);
<add> // Check that request and response share their socket.
<add> assert.strictEqual(r1.socket, socket);
<add>
<add> res.on('data', common.mustCallAtLeast(() => {}));
<add> res.on('end', common.mustCall(() => {
<add> // setImmediate() to give the agent time to register the freed socket.
<add> setImmediate(common.mustCall(() => {
<add> // The socket is free for reuse now.
<add> assert.strictEqual(socket[async_id_symbol], -1);
<add>
<add> // Second request. To re-create the exact conditions from the
<add> // referenced issue, we use a POST request without chunked encoding
<add> // (hence the Content-Length header) and call .end() after the
<add> // response header has already been received.
<add> const r2 = http.request({
<add> agent, port, method: 'POST', headers: {
<add> 'Content-Length': payload.length
<add> }
<add> }, common.mustCall((res) => {
<add> const asyncId = res.socket[async_id_symbol];
<add> assert.ok(asyncId > 0, `${asyncId} > 0`);
<add> assert.strictEqual(r2.socket, socket);
<add> // Empty payload, to hit the “right” code path.
<add> r2.end('');
<add>
<add> res.on('data', common.mustCallAtLeast(() => {}));
<add> res.on('end', common.mustCall(() => {
<add> // Clean up to let the event loop stop.
<add> server.close();
<add> agent.destroy();
<add> }));
<add> }));
<add>
<add> // Schedule a payload to be written immediately, but do not end the
<add> // request just yet.
<add> r2.write(payload);
<add> }));
<add> }));
<add> }));
<add> r1.end(payload);
<add>}));
| 2
|
Javascript
|
Javascript
|
remove redundant code in _defertoconnect
|
79d2c4e1bf13f81af62d6eb2228bb988f709b1f9
|
<ide><path>lib/_http_client.js
<ide> ClientRequest.prototype._deferToConnect = function(method, arguments_, cb) {
<ide> // in the future (when a socket gets assigned out of the pool and is
<ide> // eventually writable).
<ide> var self = this;
<add>
<add> function callSocketMethod() {
<add> if (method)
<add> self.socket[method].apply(self.socket, arguments_);
<add>
<add> if (typeof cb === 'function')
<add> cb();
<add> }
<add>
<ide> var onSocket = function() {
<ide> if (self.socket.writable) {
<del> if (method) {
<del> self.socket[method].apply(self.socket, arguments_);
<del> }
<del> if (cb) { cb(); }
<add> callSocketMethod();
<ide> } else {
<del> self.socket.once('connect', function() {
<del> if (method) {
<del> self.socket[method].apply(self.socket, arguments_);
<del> }
<del> if (cb) { cb(); }
<del> });
<add> self.socket.once('connect', callSocketMethod);
<ide> }
<ide> };
<add>
<ide> if (!self.socket) {
<ide> self.once('socket', onSocket);
<ide> } else {
| 1
|
PHP
|
PHP
|
apply fixes from styleci
|
c2d60b5ac186af29219549daf0806b4c9cdc4a21
|
<ide><path>src/Illuminate/Support/Stringable.php
<ide> public function basename($suffix = '')
<ide> {
<ide> return new static(basename($this->value, $suffix));
<ide> }
<del>
<add>
<ide> /**
<ide> * Get the basename of the class path.
<ide> *
<ide><path>tests/Support/SupportStringableTest.php
<ide> protected function stringable($string = '')
<ide> {
<ide> return new Stringable($string);
<ide> }
<del>
<add>
<ide> public function testClassBasename()
<ide> {
<ide> $this->assertEquals(
| 2
|
Javascript
|
Javascript
|
fix ie8 bug during unitless css props creation
|
123ed1f4425230aa336a7ea616064e8cdbd9dfd9
|
<ide><path>src/browser/dom/CSSProperty.js
<ide> function prefixKey(prefix, key) {
<ide> * of vendor prefixes.
<ide> */
<ide> var prefixes = ['Webkit', 'ms', 'Moz', 'O'];
<del>for (var k in isUnitlessNumber) {
<add>
<add>// Using Object.keys here, or else the vanilla for-in loop makes IE8 go into an
<add>// infinite loop, because it iterates over the newly added props too.
<add>Object.keys(isUnitlessNumber).forEach(function(prop) {
<ide> prefixes.forEach(function(prefix) {
<del> isUnitlessNumber[prefixKey(prefix, k)] = isUnitlessNumber[k];
<add> isUnitlessNumber[prefixKey(prefix, prop)] = isUnitlessNumber[prop];
<ide> });
<del>}
<add>});
<ide>
<ide> /**
<ide> * Most style properties can be unset by doing .style[prop] = '' but IE8
| 1
|
Python
|
Python
|
kill the demon spawn
|
cca75e788485e8a2a1c44a445c6aba0fb2dfaf56
|
<ide><path>examples/run_squad.py
<ide> def evaluate(args, model, tokenizer, prefix=""):
<ide> eval_feature = features[example_index.item()]
<ide> unique_id = int(eval_feature.unique_id)
<ide>
<del> result = SquadResult([to_list(output[i]) for output in outputs] + [unique_id])
<add> output = [to_list(output[i]) for output in outputs]
<add>
<add> if len(output) >= 5:
<add> start_logits = output[0]
<add> start_top_index = output[1]
<add> end_logits = output[2]
<add> end_top_index = output[3],
<add> cls_logits = output[4]
<add>
<add> result = SquadResult(
<add> unique_id, start_logits, end_logits,
<add> start_top_index=start_top_index,
<add> end_top_index=end_top_index,
<add> cls_logits=cls_logits
<add> )
<add>
<add> else:
<add> start_logits, end_logits = output
<add> result = SquadResult(
<add> unique_id, start_logits, end_logits
<add> )
<add>
<ide> all_results.append(result)
<ide>
<ide> evalTime = timeit.default_timer() - start_time
<ide><path>transformers/data/processors/squad.py
<ide> def __init__(self,
<ide> self.end_position = end_position
<ide>
<ide>
<del>
<ide> class SquadResult(object):
<ide> """
<ide> Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
<ide>
<ide> Args:
<del> result: The result output by a model on a SQuAD inference. These results may be complex (5 values) as the ones output by
<del> XLNet or XLM or may be simple like the other models (2 values). They may be passed as a list or as a dict, with the
<del> following accepted formats:
<del>
<del> `dict` output by a simple model:
<del> {
<del> "start_logits": int,
<del> "end_logits": int,
<del> "unique_id": string
<del> }
<del> `list` output by a simple model:
<del> [start_logits, end_logits, unique_id]
<del>
<del> `dict` output by a complex model:
<del> {
<del> "start_top_log_probs": float,
<del> "start_top_index": int,
<del> "end_top_log_probs": float,
<del> "end_top_index": int,
<del> "cls_logits": int,
<del> "unique_id": string
<del> }
<del> `list` output by a complex model:
<del> [start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits, unique_id]
<del>
<del> See `run_squad.py` for an example.
<add> unique_id: The unique identifier corresponding to that example.
<add> start_logits: The logits corresponding to the start of the answer
<add> end_logits: The logits corresponding to the end of the answer
<ide> """
<del> def __init__(self, result):
<del> if isinstance(result, dict):
<del> if "start_logits" in result and "end_logits" in result:
<del> self.start_logits = result["start_logits"]
<del> self.end_logits = result["end_logits"]
<del>
<del> elif "start_top_log_probs" in result and "start_top_index" in result:
<del> self.start_top_log_probs = result["start_top_log_probs"]
<del> self.start_top_index = result["start_top_index"]
<del> self.end_top_log_probs = result["end_top_log_probs"]
<del> self.end_top_index = result["end_top_index"]
<del> self.cls_logits = result["cls_logits"]
<del>
<del> else:
<del> raise ValueError("SquadResult instantiated with wrong values.")
<del>
<del> self.unique_id = result["unique_id"]
<del> elif isinstance(result, list):
<del> if len(result) == 3:
<del> self.start_logits = result[0]
<del> self.end_logits = result[1]
<del>
<del> elif len(result) == 6:
<del> self.start_top_log_probs = result[0]
<del> self.start_top_index = result[1]
<del> self.end_top_log_probs = result[2]
<del> self.end_top_index = result[3]
<del> self.cls_logits = result[4]
<del>
<del> else:
<del> raise ValueError("SquadResult instantiated with wrong values.")
<del>
<del> self.unique_id = result[-1]
<del>
<del> else:
<del> raise ValueError("SquadResult instantiated with wrong values. Should be a dictionary or a list.")
<add> def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
<add> self.start_top_log_probs = start_logits
<add> self.end_top_log_probs = end_logits
<add> self.unique_id = unique_id
<add>
<add> if start_top_index:
<add> self.start_top_index = start_top_index
<add> self.end_top_index = end_top_index
<add> self.cls_logits = cls_logits
<ide>\ No newline at end of file
| 2
|
PHP
|
PHP
|
clarify doc block
|
0d518a1d426406dcc926b54e0c526efa54410068
|
<ide><path>src/View/Helper/FormHelper.php
<ide> public function allInputs(array $fields = [], array $options = []) {
<ide> }
<ide>
<ide> /**
<del> * Generate a set of inputs for `$fields`
<add> * Generate a set of inputs for `$fields` wrapped in a fieldset element.
<ide> *
<ide> * You can customize individual inputs through `$fields`.
<ide> * {{{
| 1
|
Text
|
Text
|
remove coverage badge from readme
|
0d5e4c7e096c640419ea0aadb0d09bb9e9a7ddc8
|
<ide><path>README.md
<del># [React](https://reactjs.org/) · [](https://github.com/facebook/react/blob/master/LICENSE) [](https://www.npmjs.com/package/react) [](https://coveralls.io/github/facebook/react?branch=master) [](https://circleci.com/gh/facebook/react) [](https://reactjs.org/docs/how-to-contribute.html#your-first-pull-request)
<add># [React](https://reactjs.org/) · [](https://github.com/facebook/react/blob/master/LICENSE) [](https://www.npmjs.com/package/react) [](https://circleci.com/gh/facebook/react) [](https://reactjs.org/docs/how-to-contribute.html#your-first-pull-request)
<ide>
<ide> React is a JavaScript library for building user interfaces.
<ide>
| 1
|
Javascript
|
Javascript
|
fix typo in prefer-primordials.js
|
0dce87e2790cd9a1c3d6874fdb85c2b09d2d2dfb
|
<ide><path>tools/eslint-rules/prefer-primordials.js
<ide> /**
<ide> * @fileoverview We shouldn't use global built-in object for security and
<del> * performance reason. This linter rule reports replacable codes
<add> * performance reason. This linter rule reports replaceable codes
<ide> * that can be replaced with primordials.
<ide> * @author Leko <leko.noor@gmail.com>
<ide> */
| 1
|
Python
|
Python
|
fix test_f2py so it runs correctly in runtests.py
|
6a871df50947c4ebda79a966fba09b5336e1e061
|
<ide><path>numpy/tests/test_scripts.py
<ide> def test_f2py():
<ide> assert_equal(stdout.strip(), asbytes('2'))
<ide> success = True
<ide> break
<del> except OSError:
<add> except:
<ide> pass
<ide> assert_(success, "Warning: neither %s nor %s found in path" % f2py_cmds)
| 1
|
Python
|
Python
|
ignore typeerror at interpreter shutdown
|
b78b84a90e77c1a19da8ab0d950aa3ca27752b41
|
<ide><path>celery/utils/threads.py
<ide> def run(self):
<ide> self.on_crash("%r crashed: %r", self.name, exc, exc_info=True)
<ide> # exiting by normal means does not work here, so force exit.
<ide> os._exit(1)
<add> try:
<add> self._is_stopped.set()
<add> except TypeError: # pragma: no cover
<add> # we lost the race at interpreter shutdown,
<add> # so gc collected built-in modules.
<add> pass
<ide> self._is_stopped.set()
<ide>
<ide> def stop(self):
| 1
|
Java
|
Java
|
expose reactinstancemanager from reactrootview
|
dc16150bae19683a5e8338faa7e004b83a9cdb3c
|
<ide><path>ReactAndroid/src/main/java/com/facebook/react/ReactRootView.java
<ide> public void setRootViewTag(int rootViewTag) {
<ide> mRootViewTag = rootViewTag;
<ide> }
<ide>
<add> @Nullable
<add> public ReactInstanceManager getReactInstanceManager() {
<add> return mReactInstanceManager;
<add> }
<add>
<ide> private class CustomGlobalLayoutListener implements ViewTreeObserver.OnGlobalLayoutListener {
<ide> private final Rect mVisibleViewArea;
<ide> private final int mMinKeyboardHeightDetected;
| 1
|
Ruby
|
Ruby
|
clear the correct query cache
|
fa7efca553e325b2aabb087a4eddf4560c356094
|
<ide><path>activerecord/lib/active_record/query_cache.rb
<ide> def self.run
<ide> enabled = connection.query_cache_enabled
<ide> connection.enable_query_cache!
<ide>
<del> enabled
<add> [connection, enabled]
<ide> end
<ide>
<del> def self.complete(enabled)
<del> ActiveRecord::Base.connection.clear_query_cache
<del> ActiveRecord::Base.connection.disable_query_cache! unless enabled
<add> def self.complete((connection, enabled))
<add> connection.clear_query_cache
<add> connection.disable_query_cache! unless enabled
<ide>
<ide> unless ActiveRecord::Base.connected? && ActiveRecord::Base.connection.transaction_open?
<ide> ActiveRecord::Base.clear_active_connections!
<ide><path>activerecord/test/cases/query_cache_test.rb
<ide> def test_exceptional_middleware_clears_and_disables_cache_on_error
<ide> assert !ActiveRecord::Base.connection.query_cache_enabled, "cache off"
<ide> end
<ide>
<add> def test_exceptional_middleware_cleans_up_correct_cache
<add> connection = ActiveRecord::Base.connection
<add> called = false
<add>
<add> mw = middleware { |env|
<add> Task.find 1
<add> Task.find 1
<add> assert_equal 1, connection.query_cache.length
<add>
<add> # Checkin connection early
<add> ActiveRecord::Base.clear_active_connections!
<add> # Make sure ActiveRecord::Base.connection doesn't checkout the same connection
<add> ActiveRecord::Base.connection_pool.remove(connection)
<add>
<add> called = true
<add> }
<add> mw.call({})
<add>
<add> assert called
<add> assert_equal 0, connection.query_cache.length
<add> assert !connection.query_cache_enabled, "cache off"
<add> end
<add>
<ide> def test_exceptional_middleware_leaves_enabled_cache_alone
<ide> ActiveRecord::Base.connection.enable_query_cache!
<ide>
| 2
|
Text
|
Text
|
add 2.13.3 to changelog.md
|
5291c74d5ec0125161e8b7c72d7c6b0688efb4b2
|
<ide><path>CHANGELOG.md
<ide> - [#15178](https://github.com/emberjs/ember.js/pull/15178) Refactor route to lookup controller for QPs.
<ide> - [#15129](https://github.com/emberjs/ember.js/pull/15129) Fix access to service:-document in ember-engines
<ide>
<add>### 2.13.3 (May 31, 2017)
<add>
<add>- [#15284](https://github.com/emberjs/ember.js/pull/15284) [BUGFIX] remove nested transaction assertion from glimmer.
<add>- [glimmerjs/glimmer-vm#529](https://github.com/glimmerjs/glimmer-vm/pull/529) [BUGFIX] Fix issues identified with custom element support.
<add>
<ide> ### 2.13.2 (May 18, 2017)
<ide>
<ide> - Revert over eager dependency upgrades in 2.13.1.
| 1
|
Go
|
Go
|
fix a race condition in bufreader
|
d52451bcf887790df5a6a7cbbc7305747535eb11
|
<ide><path>utils.go
<ide> func (r *bufReader) drain() {
<ide> buf := make([]byte, 1024)
<ide> for {
<ide> n, err := r.reader.Read(buf)
<add> r.l.Lock()
<ide> if err != nil {
<ide> r.err = err
<ide> } else {
<ide> r.buf.Write(buf[0:n])
<ide> }
<del> r.l.Lock()
<ide> r.wait.Signal()
<ide> r.l.Unlock()
<ide> if err != nil {
<ide> func (r *bufReader) drain() {
<ide> }
<ide>
<ide> func (r *bufReader) Read(p []byte) (n int, err error) {
<add> r.l.Lock()
<add> defer r.l.Unlock()
<ide> for {
<ide> n, err = r.buf.Read(p)
<ide> if n > 0 {
<ide> func (r *bufReader) Read(p []byte) (n int, err error) {
<ide> if r.err != nil {
<ide> return 0, r.err
<ide> }
<del> r.l.Lock()
<ide> r.wait.Wait()
<del> r.l.Unlock()
<ide> }
<ide> return
<ide> }
| 1
|
Ruby
|
Ruby
|
enhance docs for update_attribute [ci-skip]
|
40847a7831a2bccaeb04b796c5534418d8b3c334
|
<ide><path>activerecord/lib/active_record/persistence.rb
<ide> def becomes!(klass)
<ide> #
<ide> # This method raises an +ActiveRecord::ActiveRecordError+ if the
<ide> # attribute is marked as readonly.
<add> #
<add> # See also +update_column+.
<ide> def update_attribute(name, value)
<ide> name = name.to_s
<ide> verify_readonly_attribute(name)
| 1
|
PHP
|
PHP
|
update welcome view
|
5396be6ef26aebe99c1c5ac6ec944c349d13f371
|
<ide><path>resources/views/welcome.blade.php
<ide> </div>
<ide>
<ide> <div class="ml-4 text-center text-sm text-gray-500 sm:text-right sm:ml-0">
<del> Build v{{ Illuminate\Foundation\Application::VERSION }}
<add> Laravel v{{ Illuminate\Foundation\Application::VERSION }} (PHP v{{ PHP_VERSION }})
<ide> </div>
<ide> </div>
<ide> </div>
| 1
|
PHP
|
PHP
|
add flag for select type of create controller
|
7971ecc1dd607f4b1ca16c2a4eb08ba05e64c0c6
|
<ide><path>src/Illuminate/Foundation/Console/ModelMakeCommand.php
<ide> public function fire()
<ide>
<ide> if ($this->option('controller')) {
<ide> $controller = Str::studly(class_basename($this->argument('name')));
<del>
<del> $this->call('make:controller', ['name' => "{$controller}Controller", '--resource' => true]);
<add>
<add> if ($this->option('controller_resource') {
<add> $resource = true;
<add> } else {
<add> $resource = false;
<add> }
<add>
<add> $this->call('make:controller', ['name' => "{$controller}Controller", '--resource' => $resource]);
<ide> }
<ide> }
<ide> }
<ide> protected function getOptions()
<ide> return [
<ide> ['migration', 'm', InputOption::VALUE_NONE, 'Create a new migration file for the model.'],
<ide>
<del> ['controller', 'c', InputOption::VALUE_NONE, 'Create a new resource controller for the model.'],
<add> ['controller', 'c', InputOption::VALUE_NONE, 'Create a new controller for the model.'],
<add>
<add> ['controller_resource', 'c_r', InputOption::VALUE_NONE,
<add> 'Set is resource controller or no. If create a new controller for the model']
<ide> ];
<ide> }
<ide> }
| 1
|
Go
|
Go
|
remove commonunixconfig type
|
9d9679975fad96a28b7c296df3975432f32701ba
|
<ide><path>daemon/config/config_common_unix.go
<del>// +build linux freebsd
<del>
<del>package config // import "github.com/docker/docker/daemon/config"
<del>
<del>import (
<del> "github.com/docker/docker/api/types"
<del>)
<del>
<del>// CommonUnixConfig defines configuration of a docker daemon that is
<del>// common across Unix platforms.
<del>type CommonUnixConfig struct {
<del> Runtimes map[string]types.Runtime `json:"runtimes,omitempty"`
<del> DefaultRuntime string `json:"default-runtime,omitempty"`
<del> DefaultInitBinary string `json:"default-init,omitempty"`
<del>}
<del>
<del>// GetRuntime returns the runtime path and arguments for a given
<del>// runtime name
<del>func (conf *Config) GetRuntime(name string) *types.Runtime {
<del> conf.Lock()
<del> defer conf.Unlock()
<del> if rt, ok := conf.Runtimes[name]; ok {
<del> return &rt
<del> }
<del> return nil
<del>}
<del>
<del>// GetDefaultRuntimeName returns the current default runtime
<del>func (conf *Config) GetDefaultRuntimeName() string {
<del> conf.Lock()
<del> rt := conf.DefaultRuntime
<del> conf.Unlock()
<del>
<del> return rt
<del>}
<del>
<del>// GetAllRuntimes returns a copy of the runtimes map
<del>func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
<del> conf.Lock()
<del> rts := conf.Runtimes
<del> conf.Unlock()
<del> return rts
<del>}
<del>
<del>// GetExecRoot returns the user configured Exec-root
<del>func (conf *Config) GetExecRoot() string {
<del> return conf.ExecRoot
<del>}
<del>
<del>// GetInitPath returns the configured docker-init path
<del>func (conf *Config) GetInitPath() string {
<del> conf.Lock()
<del> defer conf.Unlock()
<del> if conf.InitPath != "" {
<del> return conf.InitPath
<del> }
<del> if conf.DefaultInitBinary != "" {
<del> return conf.DefaultInitBinary
<del> }
<del> return DefaultInitBinary
<del>}
<del>
<del>// GetResolvConf returns the appropriate resolv.conf
<del>// Check setupResolvConf on how this is selected
<del>func (conf *Config) GetResolvConf() string {
<del> return conf.ResolvConf
<del>}
<ide><path>daemon/config/config_common_unix_test.go
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> )
<ide>
<del>func TestCommonUnixValidateConfigurationErrors(t *testing.T) {
<add>func TestUnixValidateConfigurationErrors(t *testing.T) {
<ide> testCases := []struct {
<ide> config *Config
<ide> }{
<ide> // Can't override the stock runtime
<ide> {
<ide> config: &Config{
<del> CommonUnixConfig: CommonUnixConfig{
<del> Runtimes: map[string]types.Runtime{
<del> StockRuntimeName: {},
<del> },
<add> Runtimes: map[string]types.Runtime{
<add> StockRuntimeName: {},
<ide> },
<ide> },
<ide> },
<ide> // Default runtime should be present in runtimes
<ide> {
<ide> config: &Config{
<del> CommonUnixConfig: CommonUnixConfig{
<del> Runtimes: map[string]types.Runtime{
<del> "foo": {},
<del> },
<del> DefaultRuntime: "bar",
<add> Runtimes: map[string]types.Runtime{
<add> "foo": {},
<ide> },
<add> DefaultRuntime: "bar",
<ide> },
<ide> },
<ide> }
<ide> func TestCommonUnixValidateConfigurationErrors(t *testing.T) {
<ide> }
<ide> }
<ide>
<del>func TestCommonUnixGetInitPath(t *testing.T) {
<add>func TestUnixGetInitPath(t *testing.T) {
<ide> testCases := []struct {
<ide> config *Config
<ide> expectedInitPath string
<ide> func TestCommonUnixGetInitPath(t *testing.T) {
<ide> },
<ide> {
<ide> config: &Config{
<del> CommonUnixConfig: CommonUnixConfig{
<del> DefaultInitBinary: "foo-init-bin",
<del> },
<add> DefaultInitBinary: "foo-init-bin",
<ide> },
<ide> expectedInitPath: "foo-init-bin",
<ide> },
<ide> {
<ide> config: &Config{
<del> InitPath: "init-path-A",
<del> CommonUnixConfig: CommonUnixConfig{
<del> DefaultInitBinary: "init-path-B",
<del> },
<add> InitPath: "init-path-A",
<add> DefaultInitBinary: "init-path-B",
<ide> },
<ide> expectedInitPath: "init-path-A",
<ide> },
<ide><path>daemon/config/config_unix.go
<ide> import (
<ide> "fmt"
<ide> "net"
<ide>
<add> "github.com/docker/docker/api/types"
<ide> containertypes "github.com/docker/docker/api/types/container"
<ide> "github.com/docker/docker/opts"
<ide> units "github.com/docker/go-units"
<ide> type BridgeConfig struct {
<ide> type Config struct {
<ide> CommonConfig
<ide>
<del> // These fields are common to all unix platforms.
<del> CommonUnixConfig
<ide> // Fields below here are platform specific.
<add> Runtimes map[string]types.Runtime `json:"runtimes,omitempty"`
<add> DefaultRuntime string `json:"default-runtime,omitempty"`
<add> DefaultInitBinary string `json:"default-init,omitempty"`
<ide> CgroupParent string `json:"cgroup-parent,omitempty"`
<ide> EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"`
<ide> RemappedRoot string `json:"userns-remap,omitempty"`
<ide> type Config struct {
<ide> Rootless bool `json:"rootless,omitempty"`
<ide> }
<ide>
<add>// GetRuntime returns the runtime path and arguments for a given
<add>// runtime name
<add>func (conf *Config) GetRuntime(name string) *types.Runtime {
<add> conf.Lock()
<add> defer conf.Unlock()
<add> if rt, ok := conf.Runtimes[name]; ok {
<add> return &rt
<add> }
<add> return nil
<add>}
<add>
<add>// GetDefaultRuntimeName returns the current default runtime
<add>func (conf *Config) GetDefaultRuntimeName() string {
<add> conf.Lock()
<add> rt := conf.DefaultRuntime
<add> conf.Unlock()
<add>
<add> return rt
<add>}
<add>
<add>// GetAllRuntimes returns a copy of the runtimes map
<add>func (conf *Config) GetAllRuntimes() map[string]types.Runtime {
<add> conf.Lock()
<add> rts := conf.Runtimes
<add> conf.Unlock()
<add> return rts
<add>}
<add>
<add>// GetExecRoot returns the user configured Exec-root
<add>func (conf *Config) GetExecRoot() string {
<add> return conf.ExecRoot
<add>}
<add>
<add>// GetInitPath returns the configured docker-init path
<add>func (conf *Config) GetInitPath() string {
<add> conf.Lock()
<add> defer conf.Unlock()
<add> if conf.InitPath != "" {
<add> return conf.InitPath
<add> }
<add> if conf.DefaultInitBinary != "" {
<add> return conf.DefaultInitBinary
<add> }
<add> return DefaultInitBinary
<add>}
<add>
<add>// GetResolvConf returns the appropriate resolv.conf
<add>// Check setupResolvConf on how this is selected
<add>func (conf *Config) GetResolvConf() string {
<add> return conf.ResolvConf
<add>}
<add>
<ide> // IsSwarmCompatible defines if swarm mode can be enabled in this config
<ide> func (conf *Config) IsSwarmCompatible() error {
<ide> if conf.ClusterStore != "" || conf.ClusterAdvertise != "" {
<ide><path>daemon/container_unix_test.go
<ide> func TestContainerWarningHostAndPublishPorts(t *testing.T) {
<ide> PortBindings: tc.ports,
<ide> }
<ide> cs := &config.Config{
<del> CommonUnixConfig: config.CommonUnixConfig{
<del> Runtimes: map[string]types.Runtime{"runc": {}},
<del> },
<add> Runtimes: map[string]types.Runtime{"runc": {}},
<ide> }
<ide> d := &Daemon{configStore: cs}
<ide> wrns, err := d.verifyContainerSettings("", hostConfig, &containertypes.Config{}, false)
| 4
|
Ruby
|
Ruby
|
move habtm translation to a builder class
|
0091808a081e85d4f44631984fa8a54ddf345868
|
<ide><path>activerecord/lib/active_record/associations.rb
<ide> module Builder #:nodoc:
<ide> autoload :HasOne, 'active_record/associations/builder/has_one'
<ide> autoload :HasMany, 'active_record/associations/builder/has_many'
<ide> autoload :HasAndBelongsToMany, 'active_record/associations/builder/has_and_belongs_to_many'
<add> autoload :HABTM, 'active_record/associations/builder/has_and_belongs_to_many'
<ide> end
<ide>
<ide> eager_autoload do
<ide> def has_and_belongs_to_many(name, scope = nil, options = {}, &extension)
<ide> has_and_belongs_to_many1(name, scope, options, &extension)
<ide> end
<ide>
<del> class JoinTableResolver
<del> KnownTable = Struct.new :join_table
<del>
<del> class KnownClass
<del> def initialize(rhs_class, lhs_class_name)
<del> @rhs_class = rhs_class
<del> @lhs_class_name = lhs_class_name
<del> @join_table = nil
<del> end
<del>
<del> def join_table
<del> @join_table ||= [@rhs_class.table_name, klass.table_name].sort.join("\0").gsub(/^(.*_)(.+)\0\1(.+)/, '\1\2_\3').gsub("\0", "_")
<del> end
<del>
<del> private
<del> def klass; @lhs_class_name.constantize; end
<del> end
<del>
<del> def self.build(rhs_class, name, options)
<del> if options[:join_table]
<del> KnownTable.new options[:join_table]
<del> else
<del> class_name = options.fetch(:class_name) {
<del> name.to_s.camelize.singularize
<del> }
<del> KnownClass.new rhs_class, class_name
<del> end
<del> end
<del> end
<del>
<del> def belongs_to_options(options)
<del> rhs_options = {}
<del>
<del> if options.key? :class_name
<del> rhs_options[:foreign_key] = options[:class_name].foreign_key
<del> rhs_options[:class_name] = options[:class_name]
<del> end
<del>
<del> if options.key? :association_foreign_key
<del> rhs_options[:foreign_key] = options[:association_foreign_key]
<del> end
<del>
<del> rhs_options
<del> end
<del>
<ide> def has_and_belongs_to_many1(name, scope = nil, options = {}, &extension)
<del> habtm = JoinTableResolver.build self, name, options
<del>
<del> join_model = Class.new(ActiveRecord::Base) {
<del> class << self;
<del> attr_accessor :class_resolver
<del> attr_accessor :name
<del> attr_accessor :table_name_resolver
<del> attr_accessor :left_association_name
<del> attr_accessor :right_association_name
<del> end
<del>
<del> def self.table_name
<del> table_name_resolver.join_table
<del> end
<del>
<del> def self.compute_type(class_name)
<del> class_resolver.compute_type class_name
<del> end
<add> builder = Builder::HABTM.new name, self, options
<ide>
<del> def self.add_left_association(name, options)
<del> self.left_association_name = name
<del> belongs_to name, options
<del> end
<del>
<del> def self.add_right_association(name, options)
<del> rhs_name = name.to_s.singularize.to_sym
<del> self.right_association_name = rhs_name
<del> belongs_to rhs_name, options
<del> end
<del> }
<del>
<del> join_model.name = "HABTM_#{name.to_s.camelize}"
<del> join_model.table_name_resolver = habtm
<del> join_model.class_resolver = self
<del>
<del> join_model.add_left_association :left_side, class: self
<del> join_model.add_right_association name, belongs_to_options(options)
<add> join_model = builder.through_model
<ide>
<ide> middle_name = [self.name.downcase.pluralize, name].join('_').gsub(/::/, '_').to_sym
<ide>
<del> middle_options = {}
<del> middle_options[:class] = join_model
<del> middle_options[:source] = join_model.left_association_name
<del> if options.key? :foreign_key
<del> middle_options[:foreign_key] = options[:foreign_key]
<del> end
<add> middle_options = builder.middle_options join_model
<ide>
<ide> hm_builder = Builder::HasMany.create_builder(self,
<ide> middle_name,
<ide> nil,
<ide> middle_options)
<ide> middle_reflection = hm_builder.build self
<add>
<ide> hm_builder.define_callbacks self, middle_reflection
<ide>
<del> Reflection.add_reflection self, middle_name, middle_reflection
<add> Reflection.add_reflection self, middle_reflection.name, middle_reflection
<ide>
<ide> include Module.new {
<ide> class_eval <<-RUBY, __FILE__, __LINE__ + 1
<ide> def destroy_associations
<del> association(:#{middle_name}).delete_all(:delete_all)
<add> association(:#{middle_reflection.name}).delete_all(:delete_all)
<ide> association(:#{name}).reset
<ide> super
<ide> end
<ide><path>activerecord/lib/active_record/associations/builder/has_and_belongs_to_many.rb
<ide> module ActiveRecord::Associations::Builder
<add> class HABTM
<add> class JoinTableResolver
<add> KnownTable = Struct.new :join_table
<add>
<add> class KnownClass
<add> def initialize(rhs_class, lhs_class_name)
<add> @rhs_class = rhs_class
<add> @lhs_class_name = lhs_class_name
<add> @join_table = nil
<add> end
<add>
<add> def join_table
<add> @join_table ||= [@rhs_class.table_name, klass.table_name].sort.join("\0").gsub(/^(.*_)(.+)\0\1(.+)/, '\1\2_\3').gsub("\0", "_")
<add> end
<add>
<add> private
<add> def klass; @lhs_class_name.constantize; end
<add> end
<add>
<add> def self.build(rhs_class, name, options)
<add> if options[:join_table]
<add> KnownTable.new options[:join_table]
<add> else
<add> class_name = options.fetch(:class_name) {
<add> name.to_s.camelize.singularize
<add> }
<add> KnownClass.new rhs_class, class_name
<add> end
<add> end
<add> end
<add>
<add> attr_reader :lhs_model, :association_name, :options
<add>
<add> def initialize(association_name, lhs_model, options)
<add> @association_name = association_name
<add> @lhs_model = lhs_model
<add> @options = options
<add> end
<add>
<add> def through_model
<add> habtm = JoinTableResolver.build lhs_model, association_name, options
<add>
<add> join_model = Class.new(ActiveRecord::Base) {
<add> class << self;
<add> attr_accessor :class_resolver
<add> attr_accessor :name
<add> attr_accessor :table_name_resolver
<add> attr_accessor :left_association_name
<add> attr_accessor :right_association_name
<add> end
<add>
<add> def self.table_name
<add> table_name_resolver.join_table
<add> end
<add>
<add> def self.compute_type(class_name)
<add> class_resolver.compute_type class_name
<add> end
<add>
<add> def self.add_left_association(name, options)
<add> self.left_association_name = name
<add> belongs_to name, options
<add> end
<add>
<add> def self.add_right_association(name, options)
<add> rhs_name = name.to_s.singularize.to_sym
<add> self.right_association_name = rhs_name
<add> belongs_to rhs_name, options
<add> end
<add>
<add> }
<add>
<add> join_model.name = "HABTM_#{association_name.to_s.camelize}"
<add> join_model.table_name_resolver = habtm
<add> join_model.class_resolver = lhs_model
<add>
<add> join_model.add_left_association :left_side, class: lhs_model
<add> join_model.add_right_association association_name, belongs_to_options(options)
<add> join_model
<add> end
<add>
<add> def middle_options(join_model)
<add> middle_options = {}
<add> middle_options[:class] = join_model
<add> middle_options[:source] = join_model.left_association_name
<add> if options.key? :foreign_key
<add> middle_options[:foreign_key] = options[:foreign_key]
<add> end
<add> middle_options
<add> end
<add>
<add> private
<add>
<add> def belongs_to_options(options)
<add> rhs_options = {}
<add>
<add> if options.key? :class_name
<add> rhs_options[:foreign_key] = options[:class_name].foreign_key
<add> rhs_options[:class_name] = options[:class_name]
<add> end
<add>
<add> if options.key? :association_foreign_key
<add> rhs_options[:foreign_key] = options[:association_foreign_key]
<add> end
<add>
<add> rhs_options
<add> end
<add> end
<add>
<ide> class HasAndBelongsToMany < CollectionAssociation #:nodoc:
<ide> def macro
<ide> :has_and_belongs_to_many
| 2
|
PHP
|
PHP
|
remove useless $app argument
|
babce912cb57699094f35c42cb133dffedb95675
|
<ide><path>src/Illuminate/Foundation/Providers/ArtisanServiceProvider.php
<ide> protected function registerAppNameCommand()
<ide> */
<ide> protected function registerChangesCommand()
<ide> {
<del> $this->app->bindShared('command.changes', function($app)
<add> $this->app->bindShared('command.changes', function()
<ide> {
<ide> return new ChangesCommand;
<ide> });
<ide> protected function registerConsoleMakeCommand()
<ide> */
<ide> protected function registerDownCommand()
<ide> {
<del> $this->app->bindShared('command.down', function($app)
<add> $this->app->bindShared('command.down', function()
<ide> {
<ide> return new DownCommand;
<ide> });
<ide> protected function registerTinkerCommand()
<ide> */
<ide> protected function registerUpCommand()
<ide> {
<del> $this->app->bindShared('command.up', function($app)
<add> $this->app->bindShared('command.up', function()
<ide> {
<ide> return new UpCommand;
<ide> });
| 1
|
Python
|
Python
|
fix gpu usage in language
|
2713041571f9a594ad27ba428283f69e8c2c55dd
|
<ide><path>spacy/language.py
<ide> from contextlib import contextmanager
<ide> import dill
<ide>
<add>import numpy
<add>from thinc.neural import Model
<add>from thinc.neural.ops import NumpyOps, CupyOps
<add>
<ide> from .tokenizer import Tokenizer
<ide> from .vocab import Vocab
<ide> from .tagger import Tagger
<ide> def get_grads(W, dW, key=None):
<ide> state = process.update(docs, golds,
<ide> state=state,
<ide> drop=drop,
<del> sgd=sgd)
<add> sgd=get_grads)
<ide> else:
<ide> process(docs, state=state)
<ide> if sgd is not None:
<ide> for key, (W, dW) in grads.items():
<add> # TODO: Unhack this when thinc improves
<add> if isinstance(W, numpy.ndarray):
<add> sgd.ops = NumpyOps()
<add> else:
<add> sgd.ops = CupyOps()
<ide> sgd(W, dW, key=key)
<ide> return state
<ide>
<ide> def begin_training(self, gold_tuples, **cfg):
<ide> # Handle crossing dependencies
<ide> gold_tuples = PseudoProjectivity.preprocess_training_data(gold_tuples)
<ide> contexts = []
<add> if cfg.get('use_gpu'):
<add> Model.ops = CupyOps()
<add> Model.Ops = CupyOps
<add> print("Use GPU")
<ide> for proc in self.pipeline:
<ide> if hasattr(proc, 'begin_training'):
<ide> context = proc.begin_training(gold_tuples,
<ide> def begin_training(self, gold_tuples, **cfg):
<ide> trainer = Trainer(self, gold_tuples, **cfg)
<ide> yield trainer, trainer.optimizer
<ide>
<add> @contextmanager
<add> def use_params(self, params, **cfg):
<add> contexts = [pipe.model.use_params(params) for pipe
<add> in self.pipeline if hasattr(pipe, 'model')
<add> and hasattr(pipe.model, 'use_params')]
<add> yield
<add> for context in contexts:
<add> try:
<add> next(context.gen)
<add> except StopIteration:
<add> pass
<add>
<ide> def pipe(self, texts, n_threads=2, batch_size=1000, **disabled):
<ide> """
<ide> Process texts as a stream, and yield Doc objects in order.
| 1
|
Ruby
|
Ruby
|
fix spelling in hasoneassociationstest
|
a0b8ac3eb240cf0ad5a72ac9da1f11c8990ff646
|
<ide><path>activerecord/test/cases/associations/has_one_associations_test.rb
<ide> def test_has_one_with_touch_option_on_empty_update
<ide> class SpecialBook < ActiveRecord::Base
<ide> self.table_name = "books"
<ide> belongs_to :author, class_name: "SpecialAuthor"
<del> has_one :subscription, class_name: "SpecialSupscription", foreign_key: "subscriber_id"
<add> has_one :subscription, class_name: "SpecialSubscription", foreign_key: "subscriber_id"
<ide>
<ide> enum status: [:proposed, :written, :published]
<ide> end
<ide> class SpecialAuthor < ActiveRecord::Base
<ide> has_one :book, class_name: "SpecialBook", foreign_key: "author_id"
<ide> end
<ide>
<del> class SpecialSupscription < ActiveRecord::Base
<add> class SpecialSubscription < ActiveRecord::Base
<ide> self.table_name = "subscriptions"
<ide> belongs_to :book, class_name: "SpecialBook"
<ide> end
| 1
|
Python
|
Python
|
add a flag to specify distribution strategies.
|
79b57a3f6ffbfba181b904f3c571d7acc45ed494
|
<ide><path>official/keras_application_models/benchmark_main.py
<ide> def run_keras_model_benchmark(_):
<ide> # Use distribution strategy
<ide> if FLAGS.dist_strat:
<ide> distribution = distribution_utils.get_distribution_strategy(
<add> distribution_strategy=FLAGS.distribution_strategy,
<ide> num_gpus=num_gpus)
<ide> elif num_gpus > 1:
<ide> # Run with multi_gpu_model
<ide><path>official/mnist/mnist.py
<ide> def run_mnist(flags_obj):
<ide> allow_soft_placement=True)
<ide>
<ide> distribution_strategy = distribution_utils.get_distribution_strategy(
<del> flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
<add> distribution_strategy=flags_obj.distribution_strategy,
<add> num_gpus=flags_core.get_num_gpus(flags_obj),
<add> all_reduce_alg=flags_obj.all_reduce_alg)
<ide>
<ide> run_config = tf.estimator.RunConfig(
<ide> train_distribute=distribution_strategy, session_config=session_config)
<ide><path>official/resnet/keras/keras_cifar_benchmark.py
<ide> def benchmark_graph_2_gpu(self):
<ide> def benchmark_graph_1_gpu_no_dist_strat(self):
<ide> """Test keras based model with Keras fit but not distribution strategies."""
<ide> self._setup()
<del> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.distribution_strategy = 'off'
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.data_dir = DATA_DIR
<ide> FLAGS.batch_size = 128
<ide> def benchmark_1_gpu_no_dist_strat(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.distribution_strategy = 'off'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_1_gpu_no_dist_strat(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.distribution_strategy = 'off'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_1_gpu(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_1_gpu(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_2_gpu(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 2
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu')
<ide> FLAGS.batch_size = 128 * 2 # 2 GPUs
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_2_gpu(self):
<ide> self._setup()
<ide> FLAGS.num_gpus = 2
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu')
<ide> FLAGS.batch_size = 128 * 2 # 2 GPUs
<ide> self._run_and_report_benchmark()
<ide><path>official/resnet/keras/keras_cifar_main.py
<ide> def run(flags_obj):
<ide> parse_record_fn=parse_record_keras)
<ide>
<ide> strategy = distribution_utils.get_distribution_strategy(
<del> num_gpus=flags_obj.num_gpus,
<del> turn_off_distribution_strategy=flags_obj.turn_off_distribution_strategy)
<add> distribution_strategy=flags_obj.distribution_strategy,
<add> num_gpus=flags_obj.num_gpus)
<ide>
<ide> strategy_scope = keras_common.get_strategy_scope(strategy)
<ide>
<ide><path>official/resnet/keras/keras_imagenet_benchmark.py
<ide> def benchmark_1_gpu_no_dist_strat(self):
<ide>
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.distribution_strategy = 'off'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_1_gpu_no_dist_strat(self):
<ide>
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = True
<add> FLAGS.distribution_strategy = 'off'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_1_gpu(self):
<ide>
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_1_gpu(self):
<ide>
<ide> FLAGS.num_gpus = 1
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
<ide> FLAGS.batch_size = 128
<ide> self._run_and_report_benchmark()
<ide> def benchmark_8_gpu(self):
<ide>
<ide> FLAGS.num_gpus = 8
<ide> FLAGS.enable_eager = True
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
<ide> FLAGS.batch_size = 128 * 8 # 8 GPUs
<ide> self._run_and_report_benchmark()
<ide> def benchmark_graph_8_gpu(self):
<ide>
<ide> FLAGS.num_gpus = 8
<ide> FLAGS.enable_eager = False
<del> FLAGS.turn_off_distribution_strategy = False
<add> FLAGS.distribution_strategy = 'default'
<ide> FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
<ide> FLAGS.batch_size = 128 * 8 # 8 GPUs
<ide> self._run_and_report_benchmark()
<ide><path>official/resnet/keras/keras_imagenet_main.py
<ide> def run(flags_obj):
<ide> parse_record_fn=parse_record_keras)
<ide>
<ide> strategy = distribution_utils.get_distribution_strategy(
<del> num_gpus=flags_obj.num_gpus,
<del> turn_off_distribution_strategy=flags_obj.turn_off_distribution_strategy)
<add> distribution_strategy=flags_obj.distribution_strategy,
<add> num_gpus=flags_obj.num_gpus)
<ide>
<ide> strategy_scope = keras_common.get_strategy_scope(strategy)
<ide>
<ide><path>official/resnet/resnet_run_loop.py
<ide> def resnet_main(
<ide> allow_soft_placement=True)
<ide>
<ide> distribution_strategy = distribution_utils.get_distribution_strategy(
<del> flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
<add> distribution_strategy=flags_obj.distribution_strategy,
<add> num_gpus=flags_core.get_num_gpus(flags_obj),
<add> all_reduce_alg=flags_obj.all_reduce_alg)
<ide>
<ide> # Creates a `RunConfig` that checkpoints every 24 hours which essentially
<ide> # results in checkpoints determined only by `epochs_between_evals`.
<ide> def define_resnet_flags(resnet_size_choices=None):
<ide> 'the expense of image resize/cropping being done as part of model '
<ide> 'inference. Note, this flag only applies to ImageNet and cannot '
<ide> 'be used for CIFAR.'))
<del> flags.DEFINE_boolean(
<del> name='turn_off_distribution_strategy', default=False,
<del> help=flags_core.help_wrap('Set to True to not use distribution '
<del> 'strategies.'))
<ide> choice_kwargs = dict(
<ide> name='resnet_size', short_name='rs', default='50',
<ide> help=flags_core.help_wrap('The size of the ResNet model to use.'))
<ide><path>official/transformer/transformer_main.py
<ide> def construct_estimator(flags_obj, params, schedule_manager):
<ide> """
<ide> if not params["use_tpu"]:
<ide> distribution_strategy = distribution_utils.get_distribution_strategy(
<del> flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
<add> distribution_strategy=flags_obj.distribution_strategy,
<add> num_gpus=flags_core.get_num_gpus(flags_obj),
<add> all_reduce_alg=flags_obj.all_reduce_alg)
<ide> return tf.estimator.Estimator(
<ide> model_fn=model_fn, model_dir=flags_obj.model_dir, params=params,
<ide> config=tf.estimator.RunConfig(train_distribute=distribution_strategy))
<ide><path>official/utils/flags/_base.py
<ide>
<ide> def define_base(data_dir=True, model_dir=True, clean=True, train_epochs=True,
<ide> epochs_between_evals=True, stop_threshold=True, batch_size=True,
<del> num_gpu=True, hooks=True, export_dir=True):
<add> num_gpu=True, hooks=True, export_dir=True,
<add> distribution_strategy=True):
<ide> """Register base flags.
<ide>
<ide> Args:
<ide> def define_base(data_dir=True, model_dir=True, clean=True, train_epochs=True,
<ide> num_gpu: Create a flag to specify the number of GPUs used.
<ide> hooks: Create a flag to specify hooks for logging.
<ide> export_dir: Create a flag to specify where a SavedModel should be exported.
<del>
<add> distribution_strategy: Create a flag to specify which Distribution Strategy
<add> to use.
<ide> Returns:
<ide> A list of flags for core.py to marks as key flags.
<ide> """
<ide> def define_base(data_dir=True, model_dir=True, clean=True, train_epochs=True,
<ide> )
<ide> key_flags.append("export_dir")
<ide>
<add> if distribution_strategy:
<add> flags.DEFINE_string(
<add> name="distribution_strategy", short_name="ds", default="default",
<add> help=help_wrap("The Distribution Strategy to use for training. "
<add> "Accepted values are 'off', 'default', 'one_device', "
<add> "'mirrored', 'parameter_server', 'collective', "
<add> "case insensitive. 'off' means not to use "
<add> "Distribution Strategy; 'default' means to choose "
<add> "from `MirroredStrategy` or `OneDeviceStrategy` "
<add> "according to the number of GPUs.")
<add> )
<add>
<ide> return key_flags
<ide>
<ide>
<ide><path>official/utils/misc/distribution_utils.py
<ide> import tensorflow as tf
<ide>
<ide>
<del>def get_distribution_strategy(num_gpus,
<del> all_reduce_alg=None,
<del> turn_off_distribution_strategy=False):
<add>def get_distribution_strategy(distribution_strategy="default",
<add> num_gpus=0,
<add> all_reduce_alg=None):
<ide> """Return a DistributionStrategy for running the model.
<ide>
<ide> Args:
<add> distribution_strategy: a string specify which distribution strategy to use.
<add> Accepted values are 'off', 'default', 'one_device', 'mirrored',
<add> 'parameter_server', 'collective', case insensitive. 'off' means not to use
<add> Distribution Strategy; 'default' means to choose from `MirroredStrategy`
<add> or `OneDeviceStrategy` according to the number of GPUs."
<ide> num_gpus: Number of GPUs to run this model.
<del> all_reduce_alg: Specify which algorithm to use when performing all-reduce.
<del> See tf.contrib.distribute.AllReduceCrossDeviceOps for available
<del> algorithms. If None, DistributionStrategy will choose based on device
<del> topology.
<del> turn_off_distribution_strategy: when set to True, do not use any
<del> distribution strategy. Note that when it is True, and num_gpus is
<del> larger than 1, it will raise a ValueError.
<add> all_reduce_alg: Optional. Specify which algorithm to use when performing
<add> all-reduce. See tf.contrib.distribute.AllReduceCrossDeviceOps for
<add> available algorithms. If None, DistributionStrategy will choose based on
<add> device topology.
<ide>
<ide> Returns:
<del> tf.contrib.distribute.DistibutionStrategy object.
<add> tf.distribute.DistibutionStrategy object.
<ide> Raises:
<del> ValueError: if turn_off_distribution_strategy is True and num_gpus is
<del> larger than 1
<add> ValueError: if `distribution_strategy` is 'off' or 'one_device' and
<add> `num_gpus` is larger than 1; or `num_gpus` is negative.
<ide> """
<del> if num_gpus == 0:
<del> if turn_off_distribution_strategy:
<del> return None
<add> if num_gpus < 0:
<add> raise ValueError("`num_gpus` can not be negative.")
<add>
<add> distribution_strategy = distribution_strategy.lower()
<add> if distribution_strategy == "off":
<add> if num_gpus > 1:
<add> raise ValueError("When {} GPUs are specified, distribution_strategy flag "
<add> "cannot be set to 'off'.".format(num_gpus))
<add> return None
<add>
<add> if (distribution_strategy == "one_device" or
<add> (distribution_strategy == "default" and num_gpus <= 1)):
<add> if num_gpus == 0:
<add> return tf.contrib.distribute.OneDeviceStrategy("device:CPU:0")
<ide> else:
<del> return tf.contrib.distribute.OneDeviceStrategy('device:CPU:0')
<del> elif num_gpus == 1:
<del> if turn_off_distribution_strategy:
<del> return None
<add> if num_gpus > 1:
<add> raise ValueError("`OneDeviceStrategy` can not be used for more than "
<add> "one device.")
<add> return tf.contrib.distribute.OneDeviceStrategy("device:GPU:0")
<add>
<add> if distribution_strategy in ("mirrored", "default"):
<add> if num_gpus == 0:
<add> assert distribution_strategy == "mirrored"
<add> devices = ["device:CPU:0"]
<ide> else:
<del> return tf.contrib.distribute.OneDeviceStrategy('device:GPU:0')
<del> elif turn_off_distribution_strategy:
<del> raise ValueError('When {} GPUs are specified, '
<del> 'turn_off_distribution_strategy flag cannot be set to'
<del> 'True.'.format(num_gpus))
<del> else: # num_gpus > 1 and not turn_off_distribution_strategy
<del> devices = ['device:GPU:%d' % i for i in range(num_gpus)]
<add> devices = ["device:GPU:%d" % i for i in range(num_gpus)]
<ide> if all_reduce_alg:
<ide> return tf.distribute.MirroredStrategy(
<ide> devices=devices,
<ide> def get_distribution_strategy(num_gpus,
<ide> else:
<ide> return tf.distribute.MirroredStrategy(devices=devices)
<ide>
<add> if distribution_strategy == "collective":
<add> return tf.contrib.distribute.CollectiveAllReduceStrategy(
<add> num_gpus_per_worker=num_gpus)
<add>
<add> if distribution_strategy == "parameter_server":
<add> return tf.contrib.distribute.ParameterServerStrategy(
<add> num_gpus_per_worker=num_gpus)
<add>
<add> raise ValueError(
<add> "Unrecognized Distribution Strategy: %r" % distribution_strategy)
<add>
<ide>
<ide> def per_device_batch_size(batch_size, num_gpus):
<ide> """For multi-gpu, batch-size must be a multiple of the number of GPUs.
<ide><path>official/utils/misc/distribution_utils_test.py
<ide> class GetDistributionStrategyTest(tf.test.TestCase):
<ide> """Tests for get_distribution_strategy."""
<ide> def test_one_device_strategy_cpu(self):
<del> ds = distribution_utils.get_distribution_strategy(0)
<add> ds = distribution_utils.get_distribution_strategy(num_gpus=0)
<ide> self.assertEquals(ds.num_replicas_in_sync, 1)
<ide> self.assertEquals(len(ds.extended.worker_devices), 1)
<ide> self.assertIn('CPU', ds.extended.worker_devices[0])
<ide>
<ide> def test_one_device_strategy_gpu(self):
<del> ds = distribution_utils.get_distribution_strategy(1)
<add> ds = distribution_utils.get_distribution_strategy(num_gpus=1)
<ide> self.assertEquals(ds.num_replicas_in_sync, 1)
<ide> self.assertEquals(len(ds.extended.worker_devices), 1)
<ide> self.assertIn('GPU', ds.extended.worker_devices[0])
<ide>
<ide> def test_mirrored_strategy(self):
<del> ds = distribution_utils.get_distribution_strategy(5)
<add> ds = distribution_utils.get_distribution_strategy(num_gpus=5)
<ide> self.assertEquals(ds.num_replicas_in_sync, 5)
<ide> self.assertEquals(len(ds.extended.worker_devices), 5)
<ide> for device in ds.extended.worker_devices:
<ide> self.assertIn('GPU', device)
<ide>
<add> def test_override_strategy(self):
<add> ds = distribution_utils.get_distribution_strategy(
<add> distribution_strategy='collective', num_gpus=2)
<add> self.assertTrue(
<add> isinstance(ds, tf.contrib.distribute.CollectiveAllReduceStrategy))
<add>
<ide>
<ide> class PerDeviceBatchSizeTest(tf.test.TestCase):
<ide> """Tests for per_device_batch_size."""
| 11
|
PHP
|
PHP
|
fix inconsistent storage of salt value
|
7ebde9d83be4336fd87321f560e71f07802379f6
|
<ide><path>tests/TestCase/Auth/DigestAuthenticateTest.php
<ide>
<ide> use Cake\Auth\DigestAuthenticate;
<ide> use Cake\Controller\ComponentRegistry;
<del>use Cake\Core\Configure;
<ide> use Cake\Http\Exception\UnauthorizedException;
<ide> use Cake\Http\Response;
<ide> use Cake\Http\ServerRequest;
<ide> use Cake\I18n\Time;
<ide> use Cake\ORM\Entity;
<ide> use Cake\TestSuite\TestCase;
<add>use Cake\Utility\Security;
<ide>
<ide> /**
<ide> * Entity for testing with hidden fields.
<ide> public function setUp(): void
<ide> {
<ide> parent::setUp();
<ide>
<add> $salt = 'foo.bar';
<add> Security::setSalt($salt);
<ide> $this->Collection = $this->getMockBuilder(ComponentRegistry::class)->getMock();
<ide> $this->auth = new DigestAuthenticate($this->Collection, [
<ide> 'realm' => 'localhost',
<ide> 'nonce' => 123,
<ide> 'opaque' => '123abc',
<del> 'secret' => 'foo.bar',
<add> 'secret' => $salt,
<ide> ]);
<del> Configure::write('Security.salt', 'foo.bar');
<ide>
<ide> $password = DigestAuthenticate::password('mariano', 'cake', 'localhost');
<ide> $User = $this->getTableLocator()->get('Users');
<ide> public function testPassword(): void
<ide> */
<ide> protected function generateNonce(?string $secret = null, ?int $expires = 300, ?int $time = null): string
<ide> {
<del> $secret = $secret ?: Configure::read('Security.salt');
<add> $secret = $secret ?: Security::getSalt();
<ide> $time = $time ?: microtime(true);
<ide> $expiryTime = $time + $expires;
<ide> $signatureValue = hash_hmac('sha256', $expiryTime . ':' . $secret, $secret);
| 1
|
Javascript
|
Javascript
|
remove use of this._readablestate
|
90ddb46d522c37d2bc2eb68a6e0c9d52f9fbba42
|
<ide><path>lib/crypto.js
<ide> Hash.prototype._transform = function(chunk, encoding, callback) {
<ide> };
<ide>
<ide> Hash.prototype._flush = function(callback) {
<del> var encoding = this._readableState.encoding || 'buffer';
<del> this.push(this._handle.digest(encoding), encoding);
<add> this.push(this._handle.digest());
<ide> callback();
<ide> };
<ide>
| 1
|
Javascript
|
Javascript
|
use standard naming convention for location
|
35af16591ee00ac74568a9a1ebc859b04bf02658
|
<ide><path>packages/ember-routing/lib/location/api.js
<ide> export default {
<ide> @since 1.4.0
<ide> */
<ide> _getHash: function () {
<del> return getHash(this._location || this.location);
<add> return getHash(this.location);
<ide> }
<ide> };
| 1
|
PHP
|
PHP
|
remove outdated information
|
2937ee514a54b1f5ff3f1f7d9f6dcf183b5b1954
|
<ide><path>src/Error/ErrorHandler.php
<ide> public function __construct($options = [])
<ide> *
<ide> * Template method of BaseErrorHandler.
<ide> *
<del> * Only when debug > 2 will a formatted error be displayed.
<del> *
<ide> * @param array $error An array of error data.
<ide> * @param bool $debug Whether or not the app is in debug mode.
<ide> * @return void
| 1
|
Python
|
Python
|
add repeat to datasets.
|
ae86bfd9ed457514662c04c4f10f7aaf536d85ea
|
<ide><path>official/keras_application_models/dataset.py
<ide> def __init__(self, batch_size):
<ide> y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)
<ide> y_test = tf.keras.utils.to_categorical(y_test, self.num_classes)
<ide> self.train_dataset = tf.data.Dataset.from_tensor_slices(
<del> (x_train, y_train)).shuffle(2000).batch(batch_size)
<add> (x_train, y_train)).shuffle(2000).batch(batch_size).repeat()
<ide> self.test_dataset = tf.data.Dataset.from_tensor_slices(
<del> (x_test, y_test)).shuffle(2000).batch(batch_size)
<add> (x_test, y_test)).shuffle(2000).batch(batch_size).repeat()
| 1
|
Text
|
Text
|
add note to upgrade guide
|
509b43ba80e467128ef120a6a2b9548a48bd6adf
|
<ide><path>upgrade.md
<ide> - Remove call to `redirectIfTrailingSlash` in `bootstrap/start.php` file.
<ide> - Edit `app/config/app.php`; in `aliases` change `'Controller' => 'Illuminate\Routing\Controllers\Controller',`
<ide> to use `Illuminate\Routing\Controller`
<add>- If you are overriding missingMethod in your controllers, add $method as the first parameter.
<ide>\ No newline at end of file
| 1
|
Javascript
|
Javascript
|
fix merge issue
|
dde6bda9516b95649f00ad6b406fcd6cfd911090
|
<ide><path>test/Stats.test.js
<ide> Object {
<ide> ],
<ide> "emitted": true,
<ide> "name": "entryB.js",
<del> "size": 2035,
<add> "size": 2170,
<ide> },
<ide> ],
<ide> "assetsByChunkName": Object {
| 1
|
Python
|
Python
|
add an extrace evaluate check
|
b791677cfe67997158be74b1196380f4e115a89b
|
<ide><path>keras/engine/training_test.py
<ide> def test_metric_state_reset_between_test_on_batch_and_evaluate(self):
<ide> x_test = np.random.random((10, 4))
<ide> y_test = np.random.random((10, 1))
<ide> loss, acc = model.test_on_batch(x_test[:2],y_test[:2])
<del> model.evaluate(x_test, y_test)
<add> loss_eval, acc_eval = model.evaluate(x_test, y_test)
<ide> loss_1, acc_1 = model.test_on_batch(x_test[:2],y_test[:2])
<add> loss_eval_1, acc_eval_1 = model.evaluate(x_test, y_test)
<ide> self.assertEqual(loss, loss_1)
<ide> self.assertEqual(acc, acc_1)
<add> self.assertEqual(loss_eval, loss_eval_1)
<add> self.assertEqual(acc_eval, acc_eval_1)
<ide>
<ide> @keras_parameterized.run_with_all_model_types(exclude_models=['sequential'])
<ide> @keras_parameterized.run_all_keras_modes
| 1
|
Text
|
Text
|
remove old comments about suppressing warnings
|
9cc4dadb0b38ae1f686e16aebee97fc633fc6347
|
<ide><path>guides/source/contributing_to_ruby_on_rails.md
<ide> You can invoke `test_jdbcmysql`, `test_jdbcsqlite3` or `test_jdbcpostgresql` als
<ide>
<ide> The test suite runs with warnings enabled. Ideally, Ruby on Rails should issue no warnings, but there may be a few, as well as some from third-party libraries. Please ignore (or fix!) them, if any, and submit patches that do not issue new warnings.
<ide>
<del>As of this writing (December, 2010) they are especially noisy with Ruby 1.9. If you are sure about what you are doing and would like to have a more clear output, there's a way to override the flag:
<add>If you are sure about what you are doing and would like to have a more clear output, there's a way to override the flag:
<ide>
<ide> ```bash
<ide> $ RUBYOPT=-W0 bundle exec rake test
| 1
|
Text
|
Text
|
move changelog entry to the top
|
2dc0918718c49e9e59bef47025f9bb465e44ddb7
|
<ide><path>actionview/CHANGELOG.md
<del>* Changed partial rendering with a collection to allow collections which
<del> implement `to_a`.
<del>
<del> Extracting the collection option had an optimization to avoid unnecessary
<del> queries of ActiveRecord Relations by calling `#to_ary` on the given
<del> collection. Instances of `Enumerator` or `Enumerable` are valid
<del> collections, but they do not implement `#to_ary`. By changing this to
<del> `#to_a`, they will now be extracted and rendered as expected.
<del>
<del> *Steven Harman*
<del>
<ide> * Show cache hits and misses when rendering partials.
<ide>
<ide> Partials using the `cache` helper will show whether a render hit or missed
<ide>
<ide> *Stan Lo*
<ide>
<add>* Changed partial rendering with a collection to allow collections which
<add> implement `to_a`.
<add>
<add> Extracting the collection option had an optimization to avoid unnecessary
<add> queries of ActiveRecord Relations by calling `#to_ary` on the given
<add> collection. Instances of `Enumerator` or `Enumerable` are valid
<add> collections, but they do not implement `#to_ary`. By changing this to
<add> `#to_a`, they will now be extracted and rendered as expected.
<add>
<add> *Steven Harman*
<add>
<ide> * New syntax for tag helpers. Avoid positional parameters and support HTML5 by default.
<ide> Example usage of tag helpers before:
<ide>
| 1
|
Python
|
Python
|
add a comment about the _normalize_two_args trick
|
c5580134c71b68c0bd24104a6393e3cea9cb25ce
|
<ide><path>numpy/_array_api/_array_object.py
<ide> def _normalize_two_args(x1, x2):
<ide> broadcasting, so the resulting shape is the same, but this prevents NumPy
<ide> from not promoting the dtype.
<ide> """
<add> # Another option would be to use signature=(x1.dtype, x2.dtype, None),
<add> # but that only works for ufuncs, so we would have to call the ufuncs
<add> # directly in the operator methods. One should also note that this
<add> # sort of trick wouldn't work for functions like searchsorted, which
<add> # don't do normal broadcasting, but there aren't any functions like
<add> # that in the array API namespace.
<ide> if x1.ndim == 0 and x2.ndim != 0:
<ide> # The _array[None] workaround was chosen because it is relatively
<ide> # performant. broadcast_to(x1._array, x2.shape) is much slower. We
| 1
|
Python
|
Python
|
fix warning in _obtain_input_shape
|
3f9c0a30ab6a024c9b6fbd83f4ae9119c93718b9
|
<ide><path>keras/applications/imagenet_utils.py
<ide> def _obtain_input_shape(input_shape,
<ide> """
<ide> if weights != 'imagenet' and input_shape is not None and len(input_shape) == 3:
<ide> if data_format == 'channels_first':
<del> if input_shape[0] != 3 or input_shape[0] != 1:
<add> if input_shape[0] != 3 and input_shape[0] != 1:
<ide> warnings.warn(
<ide> 'This model usually expects 1 or 3 input channels. '
<ide> 'However, it was passed ' + str(input_shape[0]) + ' input channels.')
<ide> default_shape = (input_shape[0], default_size, default_size)
<ide> else:
<del> if input_shape[-1] != 3 or input_shape[-1] != 1:
<add> if input_shape[-1] != 3 and input_shape[-1] != 1:
<ide> warnings.warn(
<ide> 'This model usually expects 1 or 3 input channels. '
<ide> 'However, it was passed ' + str(input_shape[-1]) + ' input channels.')
| 1
|
Python
|
Python
|
fix invalid type annotation
|
ef16c53859b578c6fb3c817d141fea7a9c9aca8e
|
<ide><path>libcloud/common/types.py
<ide> def tostring(cls, value):
<ide>
<ide> @classmethod
<ide> def fromstring(cls, value):
<del> # type: (str) -> str
<add> # type: (str) -> Optional[str]
<ide> """Return the state object attribute that matches the string
<ide> :param str value: the string to look up
<ide> :return: the state object attribute that matches the string
| 1
|
Javascript
|
Javascript
|
convert vertex colors to linear from srgb
|
98f64b0030da65117057190fb871c6be9baab811
|
<ide><path>examples/jsm/loaders/OBJLoader.js
<ide> import {
<ide> MeshPhongMaterial,
<ide> Points,
<ide> PointsMaterial,
<del> Vector3
<add> Vector3,
<add> Color
<ide> } from 'three';
<ide>
<ide> // o object_name | g group_name
<ide> const _vC = new Vector3();
<ide> const _ab = new Vector3();
<ide> const _cb = new Vector3();
<ide>
<add>const _color = new Color();
<add>
<ide> function ParserState() {
<ide>
<ide> const state = {
<ide> class OBJLoader extends Loader {
<ide> );
<ide> if ( data.length >= 7 ) {
<ide>
<del> state.colors.push(
<add> _color.setRGB(
<ide> parseFloat( data[ 4 ] ),
<ide> parseFloat( data[ 5 ] ),
<ide> parseFloat( data[ 6 ] )
<add> ).convertSRGBToLinear();
<ide>
<del> );
<add> state.colors.push( _color.r, _color.g, _color.b );
<ide>
<ide> } else {
<ide>
| 1
|
Javascript
|
Javascript
|
remove unnecessary `typeof` checks
|
6d3e2628809b94b334551344b4f1fe1c79c16e5b
|
<ide><path>packages/react-reconciler/src/ReactFiberCommitWork.js
<ide> function commitDetachRef(current: Fiber) {
<ide> // deletion, so don't let them throw. Host-originating errors should
<ide> // interrupt deletion, so it's okay
<ide> function commitUnmount(current: Fiber): void {
<del> if (typeof onCommitUnmount === 'function') {
<del> onCommitUnmount(current);
<del> }
<add> onCommitUnmount(current);
<ide>
<ide> switch (current.tag) {
<ide> case ClassComponent: {
<ide><path>packages/react-reconciler/src/ReactFiberScheduler.js
<ide> function commitRoot(root: FiberRoot, finishedWork: Fiber): void {
<ide> isWorking = false;
<ide> stopCommitLifeCyclesTimer();
<ide> stopCommitTimer();
<del> if (typeof onCommitRoot === 'function') {
<del> onCommitRoot(finishedWork.stateNode);
<del> }
<add> onCommitRoot(finishedWork.stateNode);
<ide> if (__DEV__ && ReactFiberInstrumentation.debugTool) {
<ide> ReactFiberInstrumentation.debugTool.onCommitWork(finishedWork);
<ide> }
<ide><path>packages/react/src/__tests__/ReactProfiler-test.internal.js
<ide> function loadModules({
<ide> const mockDevToolsForTest = () => {
<ide> jest.mock('react-reconciler/src/ReactFiberDevToolsHook', () => ({
<ide> injectInternals: () => {},
<add> onCommitRoot: () => {},
<add> onCommitUnmount: () => {},
<ide> isDevToolsPresent: true,
<ide> }));
<ide> };
| 3
|
Javascript
|
Javascript
|
change logic for `platformsupportsupdates
|
8307fb84265509bcaaed99f9c6b8b452f9a56bfb
|
<ide><path>src/auto-update-manager.js
<ide> export default class AutoUpdateManager {
<ide> }
<ide>
<ide> platformSupportsUpdates () {
<del> return this.getReleaseChannel() == 'stable' && (this.getPlatform() === 'darwin' || this.getPlatform() === 'win32')
<add> return this.getReleaseChannel() !== 'dev' && this.getState() !== 'unsupported'
<ide> }
<ide>
<ide> onDidBeginCheckingForUpdate (callback) {
| 1
|
Java
|
Java
|
ensure mutable headers for jetty webflux request
|
b732ff349509d2b174978c71fe522ee6aa6b57a8
|
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/JettyHeadersAdapter.java
<ide> */
<ide> class JettyHeadersAdapter implements MultiValueMap<String, String> {
<ide>
<del> private final HttpFields headers;
<add> private final HttpFields.Mutable headers;
<ide>
<ide>
<del> JettyHeadersAdapter(HttpFields headers) {
<add> JettyHeadersAdapter(HttpFields.Mutable headers) {
<ide> this.headers = headers;
<ide> }
<ide>
<ide> public String getFirst(String key) {
<ide>
<ide> @Override
<ide> public void add(String key, @Nullable String value) {
<del> if (!(this.headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<del> ((HttpFields.Mutable) this.headers).add(key, value);
<add> this.headers.add(key, value);
<ide> }
<ide>
<ide> @Override
<ide> public void addAll(MultiValueMap<String, String> values) {
<ide>
<ide> @Override
<ide> public void set(String key, @Nullable String value) {
<del> if (!(this.headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<del> ((HttpFields.Mutable) this.headers).put(key, value);
<add> this.headers.put(key, value);
<ide> }
<ide>
<ide> @Override
<ide> public List<String> get(Object key) {
<ide> @Nullable
<ide> @Override
<ide> public List<String> put(String key, List<String> value) {
<del> if (!(this.headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<ide> List<String> oldValues = get(key);
<del> ((HttpFields.Mutable) this.headers).put(key, value);
<add> this.headers.put(key, value);
<ide> return oldValues;
<ide> }
<ide>
<ide> @Nullable
<ide> @Override
<ide> public List<String> remove(Object key) {
<del> if (!(this.headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<ide> if (key instanceof String) {
<ide> List<String> oldValues = get(key);
<del> ((HttpFields.Mutable) this.headers).remove((String) key);
<add> this.headers.remove((String) key);
<ide> return oldValues;
<ide> }
<ide> return null;
<ide> public void putAll(Map<? extends String, ? extends List<String>> map) {
<ide>
<ide> @Override
<ide> public void clear() {
<del> if (!(this.headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<del> ((HttpFields.Mutable) this.headers).clear();
<add> this.headers.clear();
<ide> }
<ide>
<ide> @Override
<ide> public Collection<List<String>> values() {
<ide>
<ide> @Override
<ide> public Set<Entry<String, List<String>>> entrySet() {
<del> return new AbstractSet<Entry<String, List<String>>>() {
<add> return new AbstractSet<>() {
<ide> @Override
<ide> public Iterator<Entry<String, List<String>>> iterator() {
<ide> return new EntryIterator();
<ide> public List<String> getValue() {
<ide>
<ide> @Override
<ide> public List<String> setValue(List<String> value) {
<del> if (!(headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<ide> List<String> previousValues = headers.getValuesList(this.key);
<del> ((HttpFields.Mutable) headers).put(this.key, value);
<add> headers.put(this.key, value);
<ide> return previousValues;
<ide> }
<ide> }
<ide> public String next() {
<ide>
<ide> @Override
<ide> public void remove() {
<del> if (!(headers instanceof HttpFields.Mutable)) {
<del> throw new IllegalStateException("Immutable headers");
<del> }
<ide> if (this.currentName == null) {
<ide> throw new IllegalStateException("No current Header in iterator");
<ide> }
<ide> if (!headers.contains(this.currentName)) {
<ide> throw new IllegalStateException("Header not present: " + this.currentName);
<ide> }
<del> ((HttpFields.Mutable) headers).remove(this.currentName);
<add> headers.remove(this.currentName);
<ide> }
<ide> }
<ide>
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/JettyHttpHandlerAdapter.java
<ide> private static final class JettyServerHttpRequest extends ServletServerHttpReque
<ide>
<ide> private static MultiValueMap<String, String> createHeaders(HttpServletRequest servletRequest) {
<ide> Request request = getRequest(servletRequest);
<del> HttpFields fields = request.getMetaData().getFields();
<add> HttpFields.Mutable fields = HttpFields.build(request.getHttpFields());
<ide> return new JettyHeadersAdapter(fields);
<ide> }
<ide>
<ide> private static final class JettyServerHttpResponse extends ServletServerHttpResp
<ide>
<ide> private static HttpHeaders createHeaders(HttpServletResponse servletResponse) {
<ide> Response response = getResponse(servletResponse);
<del> HttpFields fields = response.getHttpFields();
<add> HttpFields.Mutable fields = response.getHttpFields();
<ide> return new HttpHeaders(new JettyHeadersAdapter(fields));
<ide> }
<ide>
| 2
|
Javascript
|
Javascript
|
make tls test more rigorous
|
3c29adb84fac00dfc16c51725e4832789610c6a7
|
<ide><path>test/parallel/test-tls-connect-no-host.js
<ide> if (!common.hasCrypto)
<ide> common.skip('missing crypto');
<ide>
<ide> const tls = require('tls');
<del>
<ide> const assert = require('assert');
<ide>
<ide> const cert = fixtures.readSync('test_cert.pem');
<ide> const key = fixtures.readSync('test_key.pem');
<ide> // https://github.com/nodejs/node/issues/1489
<ide> // tls.connect(options) with no options.host should accept a cert with
<ide> // CN:'localhost'
<del>tls.createServer({
<add>const server = tls.createServer({
<ide> key,
<ide> cert
<del>}).listen(0, function() {
<add>}).listen(0, common.mustCall(function() {
<ide> const socket = tls.connect({
<ide> port: this.address().port,
<ide> ca: cert,
<ide> // No host set here. 'localhost' is the default,
<ide> // but tls.checkServerIdentity() breaks before the fix with:
<ide> // Error: Hostname/IP doesn't match certificate's altnames:
<ide> // "Host: undefined. is not cert's CN: localhost"
<del> }, function() {
<add> }, common.mustCall(function() {
<ide> assert(socket.authorized);
<del> process.exit();
<del> });
<del>});
<add> socket.destroy();
<add> server.close();
<add> }));
<add>}));
| 1
|
PHP
|
PHP
|
update typehints for log/
|
ff7aeceb7a52e77232412317e67e0091fe5b7e13
|
<ide><path>src/Log/Engine/FileLog.php
<ide> public function __construct(array $config = [])
<ide> * @param array $context Additional information about the logged message
<ide> * @return void
<ide> */
<del> public function log($level, $message, array $context = [])
<add> public function log($level, $message, array $context = []): void
<ide> {
<ide> $message = $this->_format($message, $context);
<ide> $output = date('Y-m-d H:i:s') . ' ' . ucfirst($level) . ': ' . $message . "\n";
<ide> protected function _getFilename(string $level): string
<ide> * @return bool|null True if rotated successfully or false in case of error.
<ide> * Null if file doesn't need to be rotated.
<ide> */
<del> protected function _rotateFile($filename): ?bool
<add> protected function _rotateFile(string $filename): ?bool
<ide> {
<ide> $filePath = $this->_path . $filename;
<ide> clearstatcache(true, $filePath);
<ide><path>src/Log/Engine/SyslogLog.php
<ide> class SyslogLog extends BaseLog
<ide> * @param array $context Additional information about the logged message
<ide> * @return void
<ide> */
<del> public function log($level, $message, array $context = [])
<add> public function log($level, $message, array $context = []): void
<ide> {
<ide> if (!$this->_open) {
<ide> $config = $this->_config;
<ide> public function log($level, $message, array $context = [])
<ide> * @param int $facility the stream or facility to log to
<ide> * @return void
<ide> */
<del> protected function _open($ident, $options, $facility): void
<add> protected function _open(string $ident, int $options, int $facility): void
<ide> {
<ide> openlog($ident, $options, $facility);
<ide> }
<ide> protected function _open($ident, $options, $facility): void
<ide> * @param string $message Message to log.
<ide> * @return bool
<ide> */
<del> protected function _write($priority, $message): bool
<add> protected function _write(int $priority, string $message): bool
<ide> {
<ide> return syslog($priority, $message);
<ide> }
<ide><path>src/Log/Log.php
<ide> public static function setConfig($key, $config = null): void
<ide> * @param string $name Key name of a configured adapter to get.
<ide> * @return \Psr\Log\LoggerInterface|null Instance of LoggerInterface or false if not found
<ide> */
<del> public static function engine($name): ?LoggerInterface
<add> public static function engine(string $name): ?LoggerInterface
<ide> {
<ide> static::_init();
<ide> if (static::$_registry->{$name}) {
<ide> public static function write($level, $message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function emergency($message, $context = []): bool
<add> public static function emergency(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function emergency($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function alert($message, $context = []): bool
<add> public static function alert(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function alert($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function critical($message, $context = []): bool
<add> public static function critical(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function critical($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function error($message, $context = []): bool
<add> public static function error(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function error($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function warning($message, $context = []): bool
<add> public static function warning(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function warning($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function notice($message, $context = []): bool
<add> public static function notice(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function notice($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function debug($message, $context = []): bool
<add> public static function debug(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
<ide> public static function debug($message, $context = []): bool
<ide> * See Cake\Log\Log::setConfig() for more information on logging scopes.
<ide> * @return bool Success
<ide> */
<del> public static function info($message, $context = []): bool
<add> public static function info(string $message, $context = []): bool
<ide> {
<ide> return static::write(__FUNCTION__, $message, $context);
<ide> }
| 3
|
Java
|
Java
|
fix textinput crash in non-fabric
|
030d2c1931fb9ff97f682343914503a1c359e1c4
|
<ide><path>ReactAndroid/src/main/java/com/facebook/react/views/textinput/ReactEditText.java
<ide> public FabricViewStateManager getFabricViewStateManager() {
<ide> */
<ide> private void updateCachedSpannable(boolean resetStyles) {
<ide> // Noops in non-Fabric
<del> if (getFabricViewStateManager() == null) {
<add> if (mFabricViewStateManager.hasStateWrapper()) {
<ide> return;
<ide> }
<ide> // If this view doesn't have an ID yet, we don't have a cache key, so bail here
| 1
|
Javascript
|
Javascript
|
fix default `into` for nested routes
|
bf3f851ecbe3a22963975d5f3f2cb4505ed40c6f
|
<ide><path>packages/ember-application/lib/system/application.js
<ide> Ember.Application.reopenClass({
<ide> buildContainer: function(namespace) {
<ide> var container = new Ember.Container();
<ide> Ember.Container.defaultContainer = container;
<del> var ApplicationView = Ember.View.extend();
<ide>
<ide> container.set = Ember.set;
<ide> container.resolve = resolveFor(namespace);
<ide> Ember.Application.reopenClass({
<ide>
<ide> container.typeInjection('route', 'router', 'router:main');
<ide>
<del> // Register a fallback application view. App.ApplicationView will
<del> // take precedence.
<del> container.register('view', 'application', ApplicationView);
<del> if (Ember.Handlebars) {
<del> var template = Ember.Handlebars.compile("{{outlet}}");
<del> container.register('template', 'application', template);
<del> }
<del>
<ide> return container;
<ide> }
<ide> });
<ide><path>packages/ember-routing/lib/system/route.js
<ide> var get = Ember.get, set = Ember.set,
<ide>
<ide>
<ide> Ember.Route = Ember.Object.extend({
<add> exit: function() {
<add> teardownView(this);
<add> },
<add>
<ide> /**
<ide> Transition into another route. Optionally supply a model for the
<ide> route in question. The model will be serialized into the URL
<ide> Ember.Route = Ember.Object.extend({
<ide>
<ide> if (!view && !template) { return; }
<ide>
<add> this.lastRenderedTemplate = name;
<add>
<ide> options = normalizeOptions(this, name, template, options);
<ide> view = setupView(view, container, options);
<ide>
<del> if (name === 'application') {
<del> appendApplicationView(this, view);
<del> } else {
<del> appendView(this, view, options);
<del> }
<add> appendView(this, view, options);
<ide> }
<ide> });
<ide>
<add>function parentRoute(route) {
<add> var handlerInfos = route.router.router.currentHandlerInfos;
<add>
<add> var parent, current;
<add>
<add> for (var i=0, l=handlerInfos.length; i<l; i++) {
<add> current = handlerInfos[i].handler;
<add> if (current === route) { return parent; }
<add> parent = current;
<add> }
<add>}
<add>
<add>function parentTemplate(route) {
<add> var parent = parentRoute(route), template;
<add>
<add> if (!parent) { return; }
<add>
<add> if (template = parent.lastRenderedTemplate) {
<add> return template;
<add> } else {
<add> return parentTemplate(parent);
<add> }
<add>}
<add>
<ide> function normalizeOptions(route, name, template, options) {
<ide> options = options || {};
<del> options.into = options.into || 'application';
<add> options.into = options.into || parentTemplate(route);
<ide> options.outlet = options.outlet || 'main';
<ide> options.name = name;
<ide> options.template = template;
<ide> function normalizeOptions(route, name, template, options) {
<ide> }
<ide>
<ide> function setupView(view, container, options) {
<del> var containerView;
<add> var defaultView = options.into ? 'view:default' : 'view:toplevel';
<ide>
<del> view = view || container.lookup('view:default');
<add> view = view || container.lookup(defaultView);
<ide>
<ide> set(view, 'template', options.template);
<ide> set(view, 'viewName', options.name);
<ide> function setupView(view, container, options) {
<ide> return view;
<ide> }
<ide>
<del>function appendApplicationView(route, view) {
<del> var rootElement = get(route, 'router.namespace.rootElement');
<del> route.router._connectActiveView('application', view);
<del> view.appendTo(rootElement);
<add>function appendView(route, view, options) {
<add> if (options.into) {
<add> var parentView = route.router._lookupActiveView(options.into);
<add> route.teardownView = teardownOutlet(parentView, options.outlet);
<add> parentView.connectOutlet(options.outlet, view);
<add> } else {
<add> var rootElement = get(route, 'router.namespace.rootElement');
<add> route.router._connectActiveView(options.name, view);
<add> route.teardownView = teardownTopLevel(view);
<add> view.appendTo(rootElement);
<add> }
<ide> }
<ide>
<del>function appendView(route, view, options) {
<del> var parentView = route.router._lookupActiveView(options.into);
<del> parentView.connectOutlet(options.outlet, view);
<add>function teardownTopLevel(view) {
<add> return function() { view.remove(); };
<add>}
<add>
<add>function teardownOutlet(parentView, outlet) {
<add> return function() { parentView.disconnectOutlet(outlet); };
<add>}
<add>
<add>function teardownView(route) {
<add> if (route.teardownView) { route.teardownView(); }
<add>
<add> delete route.teardownView;
<add> delete route.lastRenderedTemplate;
<ide> }
<ide><path>packages/ember-routing/lib/system/router.js
<ide> Ember.Router = Ember.Object.extend({
<ide> };
<ide>
<ide> container.register('view', 'default', DefaultView);
<add> container.register('view', 'toplevel', Ember.View.extend());
<ide>
<ide> router.handleURL(location.getURL());
<ide> location.onUpdateURL(function(url) {
<ide><path>packages/ember/tests/routing/basic_test.js
<ide> function bootApplication() {
<ide> });
<ide> }
<ide>
<add>function compile(string) {
<add> return Ember.Handlebars.compile(string);
<add>}
<add>
<ide> module("Basic Routing", {
<ide> setup: function() {
<ide> Ember.run(function() {
<ide> module("Basic Routing", {
<ide> App.LoadingRoute = Ember.Route.extend({
<ide> });
<ide>
<del> Ember.TEMPLATES.home = Ember.Handlebars.compile("<h3>Hours</h3>");
<del> Ember.TEMPLATES.homepage = Ember.Handlebars.compile("<h3>Megatroll</h3><p>{{home}}</p>");
<add> Ember.TEMPLATES.application = compile("{{outlet}}");
<add> Ember.TEMPLATES.home = compile("<h3>Hours</h3>");
<add> Ember.TEMPLATES.homepage = compile("<h3>Megatroll</h3><p>{{home}}</p>");
<ide>
<ide> Router = Ember.Router.extend({
<ide> location: 'none'
<ide> test("A redirection hook is provided", function() {
<ide> equal(Ember.$("h3:contains(Hours)", "#qunit-fixture").length, 1, "The home template was rendered");
<ide> });
<ide>
<add>test("Child routes render into their parent route's template by default", function() {
<add> Ember.TEMPLATES.index = compile("<div>Index</div>");
<add> Ember.TEMPLATES.application = compile("<h1>Home</h1><div class='main'>{{outlet}}</div>");
<add> Ember.TEMPLATES.top = compile("<div class='middle'>{{outlet}}</div>");
<add> Ember.TEMPLATES.middle = compile("<div class='bottom'>{{outlet}}</div>");
<add> Ember.TEMPLATES.bottom = compile("<p>Bottom!</p>");
<add>
<add> Router.map(function(match) {
<add> match("/").to('index');
<add> match("/top").to("top", function(match) {
<add> match("/middle").to("middle", function(match) {
<add> match("/bottom").to("bottom");
<add> });
<add> });
<add> });
<add>
<add> bootApplication();
<add>
<add> Ember.run(function() {
<add> router.handleURL("/top/middle/bottom");
<add> });
<add>
<add> equal(Ember.$('.main .middle .bottom p', '#qunit-fixture').text(), "Bottom!", "The templates were rendered into their appropriate parents");
<add>});
<add>
<ide> // TODO: Parent context change
| 4
|
Text
|
Text
|
remove capistrano from asset pipeline guide
|
730046e66722cc2ad667c60e2d1b948a0c317936
|
<ide><path>guides/source/asset_pipeline.md
<ide> The command is:
<ide> $ RAILS_ENV=production rails assets:precompile
<ide> ```
<ide>
<del>Capistrano (v2.15.1 and above) includes a recipe to handle this in deployment.
<del>Add the following line to `Capfile`:
<del>
<del>```ruby
<del># Capistrano version 2
<del>load 'deploy/assets'
<del>
<del># Capistrano version 3
<del>require "capistrano/rails/assets"
<del>```
<del>
<ide> This links the folder specified in `config.assets.prefix` to `shared/assets`.
<ide> If you already use this shared folder you'll need to write your own deployment
<ide> command.
<ide> duplication of work.
<ide> Local compilation allows you to commit the compiled files into source control,
<ide> and deploy as normal.
<ide>
<del>There are three caveats:
<add>There are two caveats:
<ide>
<del>* You must not run the Capistrano deployment task that precompiles assets.
<ide> * You must ensure any necessary compressors or minifiers are
<ide> available on your development system.
<ide> * You must change the following application configuration setting:
| 1
|
Go
|
Go
|
fix path issues in docker cp tests
|
c5b312dcf5efa4f91dee59f4b701ea7a26a6d41e
|
<ide><path>integration-cli/docker_cli_cp_test.go
<ide> import (
<ide> "io/ioutil"
<ide> "os"
<ide> "os/exec"
<add> "path"
<ide> "path/filepath"
<ide> "testing"
<ide> )
<ide> func TestCpGarbagePath(t *testing.T) {
<ide> tmpname := filepath.Join(tmpdir, cpTestName)
<ide> defer os.RemoveAll(tmpdir)
<ide>
<del> path := filepath.Join("../../../../../../../../../../../../", cpFullPath)
<add> path := path.Join("../../../../../../../../../../../../", cpFullPath)
<ide>
<ide> _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
<ide> if err != nil {
<ide> func TestCpRelativePath(t *testing.T) {
<ide> tmpname := filepath.Join(tmpdir, cpTestName)
<ide> defer os.RemoveAll(tmpdir)
<ide>
<del> path, _ := filepath.Rel("/", cpFullPath)
<add> var relPath string
<add> if path.IsAbs(cpFullPath) {
<add> // normally this is `filepath.Rel("/", cpFullPath)` but we cannot
<add> // get this unix-path manipulation on windows with filepath.
<add> relPath = cpFullPath[1:]
<add> } else {
<add> t.Fatalf("path %s was assumed to be an absolute path", cpFullPath)
<add> }
<ide>
<del> _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
<add> _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+relPath, tmpdir)
<ide> if err != nil {
<del> t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, path, err)
<add> t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, relPath, err)
<ide> }
<ide>
<ide> file, _ := os.Open(tmpname)
<ide> func TestCpAbsoluteSymlink(t *testing.T) {
<ide> tmpname := filepath.Join(tmpdir, cpTestName)
<ide> defer os.RemoveAll(tmpdir)
<ide>
<del> path := filepath.Join("/", "container_path")
<add> path := path.Join("/", "container_path")
<ide>
<ide> _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
<ide> if err != nil {
<ide> func TestCpSymlinkComponent(t *testing.T) {
<ide> tmpname := filepath.Join(tmpdir, cpTestName)
<ide> defer os.RemoveAll(tmpdir)
<ide>
<del> path := filepath.Join("/", "container_path", cpTestName)
<add> path := path.Join("/", "container_path", cpTestName)
<ide>
<ide> _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir)
<ide> if err != nil {
| 1
|
Javascript
|
Javascript
|
remove uuid from bufferattribute etc
|
366450a2e98642a1bb8362eee4dcc9ae9743b27c
|
<ide><path>src/core/BufferAttribute.js
<ide> import { Vector4 } from '../math/Vector4.js';
<ide> import { Vector3 } from '../math/Vector3.js';
<ide> import { Vector2 } from '../math/Vector2.js';
<ide> import { Color } from '../math/Color.js';
<del>import { _Math } from '../math/Math.js';
<ide>
<ide> /**
<ide> * @author mrdoob / http://mrdoob.com/
<ide> function BufferAttribute( array, itemSize, normalized ) {
<ide>
<ide> }
<ide>
<del> this.uuid = _Math.generateUUID();
<ide> this.name = '';
<ide>
<ide> this.array = array;
<ide><path>src/core/InterleavedBuffer.js
<del>import { _Math } from '../math/Math.js';
<ide>
<ide> /**
<ide> * @author benaadams / https://twitter.com/ben_a_adams
<ide> */
<ide>
<ide> function InterleavedBuffer( array, stride ) {
<ide>
<del> this.uuid = _Math.generateUUID();
<del>
<ide> this.array = array;
<ide> this.stride = stride;
<ide> this.count = array !== undefined ? array.length / stride : 0;
<ide><path>src/core/InterleavedBufferAttribute.js
<del>import { _Math } from '../math/Math.js';
<ide>
<ide> /**
<ide> * @author benaadams / https://twitter.com/ben_a_adams
<ide> */
<ide>
<ide> function InterleavedBufferAttribute( interleavedBuffer, itemSize, offset, normalized ) {
<ide>
<del> this.uuid = _Math.generateUUID();
<del>
<ide> this.data = interleavedBuffer;
<ide> this.itemSize = itemSize;
<ide> this.offset = offset;
| 3
|
Go
|
Go
|
fix errors when no body
|
ab0d0a28a8085255558b5ee7be978d667f44def9
|
<ide><path>commands.go
<ide> func (cli *DockerCli) CmdBuild(args ...string) error {
<ide> if err != nil {
<ide> return err
<ide> }
<del> return fmt.Errorf("error: %s", body)
<add> if len(body) == 0 {
<add> return fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
<add> }
<add> return fmt.Errorf("Error: %s", body)
<ide> }
<ide>
<ide> // Output the result
<ide> func (cli *DockerCli) CmdPort(args ...string) error {
<ide> if frontend, exists := out.NetworkSettings.PortMapping[cmd.Arg(1)]; exists {
<ide> fmt.Println(frontend)
<ide> } else {
<del> return fmt.Errorf("error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
<add> return fmt.Errorf("Error: No private port '%s' allocated on %s", cmd.Arg(1), cmd.Arg(0))
<ide> }
<ide> return nil
<ide> }
<ide> func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int,
<ide> return nil, -1, err
<ide> }
<ide> if resp.StatusCode < 200 || resp.StatusCode >= 400 {
<del> return nil, resp.StatusCode, fmt.Errorf("error: %s", body)
<add> if len(body) == 0 {
<add> return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
<add> }
<add> return nil, resp.StatusCode, fmt.Errorf("Error: %s", body)
<ide> }
<ide> return body, resp.StatusCode, nil
<ide> }
<ide> func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer) e
<ide> if err != nil {
<ide> return err
<ide> }
<del> return fmt.Errorf("error: %s", body)
<add> if len(body) == 0 {
<add> return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
<add> }
<add> return fmt.Errorf("Error: %s", body)
<ide> }
<ide>
<ide> if resp.Header.Get("Content-Type") == "application/json" {
| 1
|
PHP
|
PHP
|
apply fixes from styleci
|
6e0fb37ca7d432e95d1386bdb1dab56b9b36ffd7
|
<ide><path>src/Illuminate/Foundation/Testing/Concerns/InteractsWithExceptionHandling.php
<ide> protected function withoutExceptionHandling(array $except = [])
<ide> $this->originalExceptionHandler = app(ExceptionHandler::class);
<ide> }
<ide>
<del> $this->app->instance(ExceptionHandler::class, new class($this->originalExceptionHandler, $except) implements ExceptionHandler {
<add> $this->app->instance(ExceptionHandler::class, new class($this->originalExceptionHandler, $except) implements ExceptionHandler
<add> {
<ide> protected $except;
<ide> protected $originalHandler;
<ide>
| 1
|
Ruby
|
Ruby
|
match the controller and path names defensively
|
c00e7aa919624e4f26a6bc0a924892a87dbd91c0
|
<ide><path>actionpack/lib/action_dispatch/routing/inspector.rb
<ide> def action
<ide> end
<ide>
<ide> def internal?
<del> controller =~ %r{^rails/info|^rails/welcome} || path =~ %r{^#{Rails.application.config.assets.prefix}}
<add> controller =~ %r{\Arails/(info|welcome)} || path =~ %r{\A#{Rails.application.config.assets.prefix}}
<ide> end
<ide>
<ide> def engine?
| 1
|
Go
|
Go
|
return proper exit code on builder panic
|
385f1174ff6905243c51d97d641d815a520229ff
|
<ide><path>cli/command/image/build.go
<ide> func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
<ide> }
<ide> return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
<ide> }
<add> return err
<ide> }
<ide>
<ide> // Windows: show error message about modified file permissions if the
| 1
|
Ruby
|
Ruby
|
add missing files
|
62ac3279e87c1c9a9bf669d1cc98b660d06b7667
|
<ide><path>activesupport/lib/active_support/core_ext/date_time/conversions.rb
<add>module ActiveSupport #:nodoc:
<add> module CoreExtensions #:nodoc:
<add> module DateTime #:nodoc:
<add> # Getting datetimes in different convenient string representations and other objects
<add> module Conversions
<add> DATE_FORMATS = {
<add> :db => "%Y-%m-%d %H:%M:%S",
<add> :time => "%H:%M",
<add> :short => "%d %b %H:%M",
<add> :long => "%B %d, %Y %H:%M",
<add> :rfc822 => "%a, %d %b %Y %H:%M:%S %z"
<add> }
<add> def self.included(klass)
<add> klass.send(:alias_method, :to_datetime_default_s, :to_s)
<add> klass.send(:alias_method, :to_s, :to_formatted_s)
<add> end
<add>
<add> def to_formatted_s(format = :default)
<add> DATE_FORMATS[format] ? strftime(DATE_FORMATS[format]).strip : to_datetime_default_s
<add> end
<add>
<add> def to_date
<add> ::Date.new(year, month, day)
<add> end
<add> end
<add> end
<add> end
<add>end
<ide>\ No newline at end of file
<ide><path>activesupport/test/core_ext/date_time_ext_test.rb
<add>require File.dirname(__FILE__) + '/../abstract_unit'
<add>
<add>class DateTimeExtCalculationsTest < Test::Unit::TestCase
<add> def test_to_s
<add> assert_equal "2005-02-21 14:30:00", DateTime.new(2005, 2, 21, 14, 30, 0).to_s(:db)
<add> assert_equal "14:30", DateTime.new(2005, 2, 21, 14, 30, 0).to_s(:time)
<add> assert_equal "21 Feb 14:30", DateTime.new(2005, 2, 21, 14, 30, 0).to_s(:short)
<add> assert_equal "February 21, 2005 14:30", DateTime.new(2005, 2, 21, 14, 30, 0).to_s(:long)
<add> assert_equal "Mon, 21 Feb 2005 14:30:00 +0000", DateTime.new(2005, 2, 21, 14, 30, 0).to_s(:rfc822)
<add> end
<add>
<add> def test_to_date
<add> assert_equal Date.new(2005, 2, 21), DateTime.new(2005, 2, 21).to_date
<add> end
<add>end
<ide>\ No newline at end of file
| 2
|
Ruby
|
Ruby
|
fix typo in `allow_failure` argument [ci skip]
|
116e2c67f5ca4518f2db80b761cc3d5f4993b12a
|
<ide><path>railties/test/isolation/abstract_unit.rb
<ide> def engine(name)
<ide>
<ide> # Invoke a bin/rails command inside the app
<ide> #
<del> # allow_failures:: true to return normally if the command exits with
<add> # allow_failure:: true to return normally if the command exits with
<ide> # a non-zero status. By default, this method will raise.
<ide> # stderr:: true to pass STDERR output straight to the "real" STDERR.
<ide> # By default, the STDERR and STDOUT of the process will be
| 1
|
Ruby
|
Ruby
|
remove duplicate part from deprecation warning
|
63316b7b74107345def7bddf719125f309d734eb
|
<ide><path>activerecord/lib/active_record/database_configurations/database_config.rb
<ide> class DatabaseConfigurations
<ide> # as this is the parent class for the types of database configuration objects.
<ide> class DatabaseConfig # :nodoc:
<ide> attr_reader :env_name, :name, :spec_name
<del> deprecate :spec_name, "spec_name accessors are deprecated and will be removed in Rails 6.2, please use name instead."
<add> deprecate spec_name: "please use name instead"
<ide>
<ide> attr_accessor :owner_name
<ide>
<ide><path>activerecord/lib/active_record/tasks/database_tasks.rb
<ide> def env
<ide> def spec
<ide> @spec ||= "primary"
<ide> end
<del> deprecate :spec, "spec_name accessors are deprecated and will be removed in Rails 6.2, please use name instead."
<add> deprecate spec: "please use name instead"
<ide>
<ide> def name
<ide> @name ||= "primary"
| 2
|
Python
|
Python
|
move np.testing.dec tests to test_deprecations.py
|
b05294c169438d4e230374f0e9677682d84c0094
|
<ide><path>numpy/core/tests/test_deprecations.py
<ide>
<ide> import numpy as np
<ide> from numpy.testing import (
<del> assert_raises, assert_warns, assert_, assert_array_equal
<add> assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, KnownFailureException
<ide> )
<ide>
<ide> from numpy.core._multiarray_tests import fromstring_null_term_c_api
<ide> class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
<ide> def test_deprecated(self):
<ide> ctor = np.core.multiarray.scalar
<ide> self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
<add>
<add>try:
<add> with warnings.catch_warnings():
<add> warnings.simplefilter("always")
<add> import nose # noqa: F401
<add>except ImportError:
<add> HAVE_NOSE = False
<add>else:
<add> HAVE_NOSE = True
<add>
<add>
<add>@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
<add>class TestNoseDecoratorsDeprecated(_DeprecationTestCase):
<add> class DidntSkipException(Exception):
<add> pass
<add>
<add> def test_slow(self):
<add> def _test_slow():
<add> @np.testing.dec.slow
<add> def slow_func(x, y, z):
<add> pass
<add>
<add> assert_(slow_func.slow)
<add> self.assert_deprecated(_test_slow)
<add>
<add> def test_setastest(self):
<add> def _test_setastest():
<add> @np.testing.dec.setastest()
<add> def f_default(a):
<add> pass
<add>
<add> @np.testing.dec.setastest(True)
<add> def f_istest(a):
<add> pass
<add>
<add> @np.testing.dec.setastest(False)
<add> def f_isnottest(a):
<add> pass
<add>
<add> assert_(f_default.__test__)
<add> assert_(f_istest.__test__)
<add> assert_(not f_isnottest.__test__)
<add> self.assert_deprecated(_test_setastest, num=3)
<add>
<add> def test_skip_functions_hardcoded(self):
<add> def _test_skip_functions_hardcoded():
<add> @np.testing.dec.skipif(True)
<add> def f1(x):
<add> raise self.DidntSkipException
<add>
<add> try:
<add> f1('a')
<add> except self.DidntSkipException:
<add> raise Exception('Failed to skip')
<add> except SkipTest().__class__:
<add> pass
<add>
<add> @np.testing.dec.skipif(False)
<add> def f2(x):
<add> raise self.DidntSkipException
<add>
<add> try:
<add> f2('a')
<add> except self.DidntSkipException:
<add> pass
<add> except SkipTest().__class__:
<add> raise Exception('Skipped when not expected to')
<add> self.assert_deprecated(_test_skip_functions_hardcoded, num=2)
<add>
<add> def test_skip_functions_callable(self):
<add> def _test_skip_functions_callable():
<add> def skip_tester():
<add> return skip_flag == 'skip me!'
<add>
<add> @np.testing.dec.skipif(skip_tester)
<add> def f1(x):
<add> raise self.DidntSkipException
<add>
<add> try:
<add> skip_flag = 'skip me!'
<add> f1('a')
<add> except self.DidntSkipException:
<add> raise Exception('Failed to skip')
<add> except SkipTest().__class__:
<add> pass
<add>
<add> @np.testing.dec.skipif(skip_tester)
<add> def f2(x):
<add> raise self.DidntSkipException
<add>
<add> try:
<add> skip_flag = 'five is right out!'
<add> f2('a')
<add> except self.DidntSkipException:
<add> pass
<add> except SkipTest().__class__:
<add> raise Exception('Skipped when not expected to')
<add> self.assert_deprecated(_test_skip_functions_callable, num=2)
<add>
<add> def test_skip_generators_hardcoded(self):
<add> def _test_skip_generators_hardcoded():
<add> @np.testing.dec.knownfailureif(True, "This test is known to fail")
<add> def g1(x):
<add> yield from range(x)
<add>
<add> try:
<add> for j in g1(10):
<add> pass
<add> except KnownFailureException().__class__:
<add> pass
<add> else:
<add> raise Exception('Failed to mark as known failure')
<add>
<add> @np.testing.dec.knownfailureif(False, "This test is NOT known to fail")
<add> def g2(x):
<add> yield from range(x)
<add> raise self.DidntSkipException('FAIL')
<add>
<add> try:
<add> for j in g2(10):
<add> pass
<add> except KnownFailureException().__class__:
<add> raise Exception('Marked incorrectly as known failure')
<add> except self.DidntSkipException:
<add> pass
<add> self.assert_deprecated(_test_skip_generators_hardcoded, num=2)
<add>
<add> def test_skip_generators_callable(self):
<add> def _test_skip_generators_callable():
<add> def skip_tester():
<add> return skip_flag == 'skip me!'
<add>
<add> @np.testing.dec.knownfailureif(skip_tester, "This test is known to fail")
<add> def g1(x):
<add> yield from range(x)
<add>
<add> try:
<add> skip_flag = 'skip me!'
<add> for j in g1(10):
<add> pass
<add> except KnownFailureException().__class__:
<add> pass
<add> else:
<add> raise Exception('Failed to mark as known failure')
<add>
<add> @np.testing.dec.knownfailureif(skip_tester, "This test is NOT known to fail")
<add> def g2(x):
<add> yield from range(x)
<add> raise self.DidntSkipException('FAIL')
<add>
<add> try:
<add> skip_flag = 'do not skip'
<add> for j in g2(10):
<add> pass
<add> except KnownFailureException().__class__:
<add> raise Exception('Marked incorrectly as known failure')
<add> except self.DidntSkipException:
<add> pass
<add> self.assert_deprecated(_test_skip_generators_callable, num=2)
<add>
<add> def test_deprecated(self):
<add> def _test_deprecated():
<add> @np.testing.dec.deprecated(True)
<add> def non_deprecated_func():
<add> pass
<add>
<add> @np.testing.dec.deprecated()
<add> def deprecated_func():
<add> import warnings
<add> warnings.warn("TEST: deprecated func", DeprecationWarning)
<add>
<add> @np.testing.dec.deprecated()
<add> def deprecated_func2():
<add> import warnings
<add> warnings.warn("AHHHH")
<add> raise ValueError
<add>
<add> @np.testing.dec.deprecated()
<add> def deprecated_func3():
<add> import warnings
<add> warnings.warn("AHHHH")
<add>
<add> # marked as deprecated, but does not raise DeprecationWarning
<add> assert_raises(AssertionError, non_deprecated_func)
<add> # should be silent
<add> deprecated_func()
<add> with warnings.catch_warnings(record=True):
<add> warnings.simplefilter("always") # do not propagate unrelated warnings
<add> # fails if deprecated decorator just disables test. See #1453.
<add> assert_raises(ValueError, deprecated_func2)
<add> # warning is not a DeprecationWarning
<add> assert_raises(AssertionError, deprecated_func3)
<add> self.assert_deprecated(_test_deprecated, num=4)
<add>
<add> def test_parametrize(self):
<add> def _test_parametrize():
<add> # dec.parametrize assumes that it is being run by nose. Because
<add> # we are running under pytest, we need to explicitly check the
<add> # results.
<add> @np.testing.dec.parametrize('base, power, expected',
<add> [(1, 1, 1),
<add> (2, 1, 2),
<add> (2, 2, 4)])
<add> def check_parametrize(base, power, expected):
<add> assert_(base**power == expected)
<add>
<add> count = 0
<add> for test in check_parametrize():
<add> test[0](*test[1:])
<add> count += 1
<add> assert_(count == 3)
<add> self.assert_deprecated(_test_parametrize)
<ide><path>numpy/testing/tests/test_decorators.py
<del>"""
<del>Test the decorators from ``testing.decorators``.
<del>
<del>"""
<del>import warnings
<del>import pytest
<del>
<del>from numpy.testing import (
<del> assert_, assert_raises, dec, SkipTest, KnownFailureException,
<del> )
<del>
<del>
<del>try:
<del> with warnings.catch_warnings():
<del> warnings.simplefilter("always")
<del> import nose # noqa: F401
<del>except ImportError:
<del> HAVE_NOSE = False
<del>else:
<del> HAVE_NOSE = True
<del>
<del>
<del>@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
<del>class TestNoseDecorators:
<del> # These tests are run in a class for simplicity while still
<del> # getting a report on each, skipped or success.
<del>
<del> class DidntSkipException(Exception):
<del> pass
<del>
<del> def test_slow(self):
<del> @dec.slow
<del> def slow_func(x, y, z):
<del> pass
<del>
<del> assert_(slow_func.slow)
<del>
<del> def test_setastest(self):
<del> @dec.setastest()
<del> def f_default(a):
<del> pass
<del>
<del> @dec.setastest(True)
<del> def f_istest(a):
<del> pass
<del>
<del> @dec.setastest(False)
<del> def f_isnottest(a):
<del> pass
<del>
<del> assert_(f_default.__test__)
<del> assert_(f_istest.__test__)
<del> assert_(not f_isnottest.__test__)
<del>
<del> def test_skip_functions_hardcoded(self):
<del> @dec.skipif(True)
<del> def f1(x):
<del> raise self.DidntSkipException
<del>
<del> try:
<del> f1('a')
<del> except self.DidntSkipException:
<del> raise Exception('Failed to skip')
<del> except SkipTest().__class__:
<del> pass
<del>
<del> @dec.skipif(False)
<del> def f2(x):
<del> raise self.DidntSkipException
<del>
<del> try:
<del> f2('a')
<del> except self.DidntSkipException:
<del> pass
<del> except SkipTest().__class__:
<del> raise Exception('Skipped when not expected to')
<del>
<del> def test_skip_functions_callable(self):
<del> def skip_tester():
<del> return skip_flag == 'skip me!'
<del>
<del> @dec.skipif(skip_tester)
<del> def f1(x):
<del> raise self.DidntSkipException
<del>
<del> try:
<del> skip_flag = 'skip me!'
<del> f1('a')
<del> except self.DidntSkipException:
<del> raise Exception('Failed to skip')
<del> except SkipTest().__class__:
<del> pass
<del>
<del> @dec.skipif(skip_tester)
<del> def f2(x):
<del> raise self.DidntSkipException
<del>
<del> try:
<del> skip_flag = 'five is right out!'
<del> f2('a')
<del> except self.DidntSkipException:
<del> pass
<del> except SkipTest().__class__:
<del> raise Exception('Skipped when not expected to')
<del>
<del> def test_skip_generators_hardcoded(self):
<del> @dec.knownfailureif(True, "This test is known to fail")
<del> def g1(x):
<del> yield from range(x)
<del>
<del> try:
<del> for j in g1(10):
<del> pass
<del> except KnownFailureException().__class__:
<del> pass
<del> else:
<del> raise Exception('Failed to mark as known failure')
<del>
<del> @dec.knownfailureif(False, "This test is NOT known to fail")
<del> def g2(x):
<del> yield from range(x)
<del> raise self.DidntSkipException('FAIL')
<del>
<del> try:
<del> for j in g2(10):
<del> pass
<del> except KnownFailureException().__class__:
<del> raise Exception('Marked incorrectly as known failure')
<del> except self.DidntSkipException:
<del> pass
<del>
<del> def test_skip_generators_callable(self):
<del> def skip_tester():
<del> return skip_flag == 'skip me!'
<del>
<del> @dec.knownfailureif(skip_tester, "This test is known to fail")
<del> def g1(x):
<del> yield from range(x)
<del>
<del> try:
<del> skip_flag = 'skip me!'
<del> for j in g1(10):
<del> pass
<del> except KnownFailureException().__class__:
<del> pass
<del> else:
<del> raise Exception('Failed to mark as known failure')
<del>
<del> @dec.knownfailureif(skip_tester, "This test is NOT known to fail")
<del> def g2(x):
<del> yield from range(x)
<del> raise self.DidntSkipException('FAIL')
<del>
<del> try:
<del> skip_flag = 'do not skip'
<del> for j in g2(10):
<del> pass
<del> except KnownFailureException().__class__:
<del> raise Exception('Marked incorrectly as known failure')
<del> except self.DidntSkipException:
<del> pass
<del>
<del> def test_deprecated(self):
<del> @dec.deprecated(True)
<del> def non_deprecated_func():
<del> pass
<del>
<del> @dec.deprecated()
<del> def deprecated_func():
<del> import warnings
<del> warnings.warn("TEST: deprecated func", DeprecationWarning)
<del>
<del> @dec.deprecated()
<del> def deprecated_func2():
<del> import warnings
<del> warnings.warn("AHHHH")
<del> raise ValueError
<del>
<del> @dec.deprecated()
<del> def deprecated_func3():
<del> import warnings
<del> warnings.warn("AHHHH")
<del>
<del> # marked as deprecated, but does not raise DeprecationWarning
<del> assert_raises(AssertionError, non_deprecated_func)
<del> # should be silent
<del> deprecated_func()
<del> with warnings.catch_warnings(record=True):
<del> warnings.simplefilter("always") # do not propagate unrelated warnings
<del> # fails if deprecated decorator just disables test. See #1453.
<del> assert_raises(ValueError, deprecated_func2)
<del> # warning is not a DeprecationWarning
<del> assert_raises(AssertionError, deprecated_func3)
<del>
<del> def test_parametrize(self):
<del> # dec.parametrize assumes that it is being run by nose. Because
<del> # we are running under pytest, we need to explicitly check the
<del> # results.
<del> @dec.parametrize('base, power, expected',
<del> [(1, 1, 1),
<del> (2, 1, 2),
<del> (2, 2, 4)])
<del> def check_parametrize(base, power, expected):
<del> assert_(base**power == expected)
<del>
<del> count = 0
<del> for test in check_parametrize():
<del> test[0](*test[1:])
<del> count += 1
<del> assert_(count == 3)
| 2
|
Ruby
|
Ruby
|
move some exceptions
|
0225f07ebadad87d3286c65ae705e5c78e34cb20
|
<ide><path>Library/Homebrew/exceptions.rb
<add>class UsageError <RuntimeError; end
<add>class FormulaUnspecifiedError <UsageError; end
<add>class KegUnspecifiedError <UsageError; end
<ide>
<del>class NotAKegError < RuntimeError
<add>class MultipleVersionsInstalledError <RuntimeError
<add> attr :name
<add>
<add> def initialize name
<add> @name = name
<add> super "#{name} has multiple installed versions"
<add> end
<add>end
<add>
<add>class NotAKegError < RuntimeError; end
<add>
<add>class NoSuchKegError <RuntimeError
<add> attr :name
<add>
<add> def initialize name
<add> @name = name
<add> super "No such keg: #{HOMEBREW_CELLAR}/#{name}"
<add> end
<ide> end
<ide>
<ide> class FormulaUnavailableError < RuntimeError
<ide> attr :name
<add>
<ide> def initialize name
<ide> @name = name
<ide> super "No available formula for #{name}"
<ide> def initialize name
<ide> module Homebrew
<ide> class InstallationError < RuntimeError
<ide> attr :formula
<add>
<ide> def initialize formula
<ide> @formula = formula
<ide> end
<ide><path>Library/Homebrew/extend/ARGV.rb
<del>class UsageError <RuntimeError; end
<del>class FormulaUnspecifiedError <UsageError; end
<del>class KegUnspecifiedError <UsageError; end
<del>
<del>class MultipleVersionsInstalledError <RuntimeError
<del> attr :name
<del>
<del> def initialize name
<del> @name = name
<del> super "#{name} has multiple installed versions"
<del> end
<del>end
<del>
<del>class NoSuchKegError <RuntimeError
<del> attr :name
<del>
<del> def initialize name
<del> @name = name
<del> super "No such keg: #{HOMEBREW_CELLAR}/#{name}"
<del> end
<del>end
<del>
<ide> module HomebrewArgvExtension
<ide> def named
<ide> @named ||= reject{|arg| arg[0..0] == '-'}
| 2
|
PHP
|
PHP
|
update phpdocs for querybuilder
|
250b25d6404a6b3c8f933bae63f6d194188f863f
|
<ide><path>src/Illuminate/Database/Query/Builder.php
<ide> class Builder
<ide> /**
<ide> * The database connection instance.
<ide> *
<del> * @var \Illuminate\Database\Connection
<add> * @var \Illuminate\Database\ConnectionInterface
<ide> */
<ide> public $connection;
<ide>
<ide> public function from($table)
<ide> *
<ide> * @param string $table
<ide> * @param string $first
<del> * @param string $operator
<del> * @param string $second
<add> * @param string|null $operator
<add> * @param string|null $second
<ide> * @param string $type
<ide> * @param bool $where
<ide> * @return $this
<ide> public function joinWhere($table, $first, $operator, $second, $type = 'inner')
<ide> *
<ide> * @param string $table
<ide> * @param string $first
<del> * @param string $operator
<del> * @param string $second
<add> * @param string|null $operator
<add> * @param string|null $second
<ide> * @return \Illuminate\Database\Query\Builder|static
<ide> */
<ide> public function leftJoin($table, $first, $operator = null, $second = null)
<ide> public function leftJoinWhere($table, $first, $operator, $second)
<ide> *
<ide> * @param string $table
<ide> * @param string $first
<del> * @param string $operator
<del> * @param string $second
<add> * @param string|null $operator
<add> * @param string|null $second
<ide> * @return \Illuminate\Database\Query\Builder|static
<ide> */
<ide> public function rightJoin($table, $first, $operator = null, $second = null)
<ide> public function rightJoinWhere($table, $first, $operator, $second)
<ide> * Add a "cross join" clause to the query.
<ide> *
<ide> * @param string $table
<del> * @param string $first
<del> * @param string $operator
<del> * @param string $second
<add> * @param string|null $first
<add> * @param string|null $operator
<add> * @param string|null $second
<ide> * @return \Illuminate\Database\Query\Builder|static
<ide> */
<ide> public function crossJoin($table, $first = null, $operator = null, $second = null)
<ide> public function mergeWheres($wheres, $bindings)
<ide> * Add a basic where clause to the query.
<ide> *
<ide> * @param string|array|\Closure $column
<del> * @param string $operator
<add> * @param string|null $operator
<ide> * @param mixed $value
<ide> * @param string $boolean
<ide> * @return $this
<ide> protected function invalidOperator($operator)
<ide> * Add an "or where" clause to the query.
<ide> *
<ide> * @param string|array|\Closure $column
<del> * @param string $operator
<add> * @param string|null $operator
<ide> * @param mixed $value
<ide> * @return \Illuminate\Database\Query\Builder|static
<ide> */
<ide> public function groupBy(...$groups)
<ide> * Add a "having" clause to the query.
<ide> *
<ide> * @param string $column
<del> * @param string $operator
<del> * @param string $value
<add> * @param string|null $operator
<add> * @param string|null $value
<ide> * @param string $boolean
<ide> * @return $this
<ide> */
<ide> public function having($column, $operator = null, $value = null, $boolean = 'and
<ide> * Add a "or having" clause to the query.
<ide> *
<ide> * @param string $column
<del> * @param string $operator
<del> * @param string $value
<add> * @param string|null $operator
<add> * @param string|null $value
<ide> * @return \Illuminate\Database\Query\Builder|static
<ide> */
<ide> public function orHaving($column, $operator = null, $value = null)
<ide> public function insert(array $values)
<ide> * Insert a new record and get the value of the primary key.
<ide> *
<ide> * @param array $values
<del> * @param string $sequence
<add> * @param string|null $sequence
<ide> * @return int
<ide> */
<ide> public function insertGetId(array $values, $sequence = null)
| 1
|
Ruby
|
Ruby
|
optimize logger.debug calling
|
99c98c80348d37109ec672801f36c79014e5d3bf
|
<ide><path>actioncable/lib/action_cable/server/broadcasting.rb
<ide> def initialize(server, broadcasting, coder:)
<ide> end
<ide>
<ide> def broadcast(message)
<del> server.logger.debug "[ActionCable] Broadcasting to #{broadcasting}: #{message.inspect}"
<add> server.logger.debug { "[ActionCable] Broadcasting to #{broadcasting}: #{message.inspect}" }
<ide>
<ide> payload = { broadcasting: broadcasting, message: message, coder: coder }
<ide> ActiveSupport::Notifications.instrument("broadcast.action_cable", payload) do
| 1
|
Javascript
|
Javascript
|
pass object from array as value to _fallback
|
d2d5f49953c130253c06621476e0eecdff4544d0
|
<ide><path>src/helpers/helpers.config.js
<ide> function resolveFallback(fallback, prop, value) {
<ide> const getScope = (key, parent) => key === true ? parent
<ide> : typeof key === 'string' ? resolveObjectKey(parent, key) : undefined;
<ide>
<del>function addScopes(set, parentScopes, key, parentFallback) {
<add>function addScopes(set, parentScopes, key, parentFallback, value) {
<ide> for (const parent of parentScopes) {
<ide> const scope = getScope(key, parent);
<ide> if (scope) {
<ide> set.add(scope);
<del> const fallback = resolveFallback(scope._fallback, key, scope);
<add> const fallback = resolveFallback(scope._fallback, key, value);
<ide> if (defined(fallback) && fallback !== key && fallback !== parentFallback) {
<ide> // When we reach the descriptor that defines a new _fallback, return that.
<ide> // The fallback will resume to that new scope.
<ide> function createSubResolver(parentScopes, resolver, prop, value) {
<ide> const allScopes = [...parentScopes, ...rootScopes];
<ide> const set = new Set();
<ide> set.add(value);
<del> let key = addScopesFromKey(set, allScopes, prop, fallback || prop);
<add> let key = addScopesFromKey(set, allScopes, prop, fallback || prop, value);
<ide> if (key === null) {
<ide> return false;
<ide> }
<ide> if (defined(fallback) && fallback !== prop) {
<del> key = addScopesFromKey(set, allScopes, fallback, key);
<add> key = addScopesFromKey(set, allScopes, fallback, key, value);
<ide> if (key === null) {
<ide> return false;
<ide> }
<ide> function createSubResolver(parentScopes, resolver, prop, value) {
<ide> () => subGetTarget(resolver, prop, value));
<ide> }
<ide>
<del>function addScopesFromKey(set, allScopes, key, fallback) {
<add>function addScopesFromKey(set, allScopes, key, fallback, item) {
<ide> while (key) {
<del> key = addScopes(set, allScopes, key, fallback);
<add> key = addScopes(set, allScopes, key, fallback, item);
<ide> }
<ide> return key;
<ide> }
<ide><path>test/specs/helpers.config.tests.js
<ide> describe('Chart.helpers.config', function() {
<ide> });
<ide> });
<ide>
<add> it('should call _fallback with proper value from array when descriptor is object', function() {
<add> const spy = jasmine.createSpy('fallback');
<add> const descriptors = {
<add> items: {
<add> _fallback: spy
<add> }
<add> };
<add> const options = {
<add> items: [{test: true}]
<add> };
<add> const resolver = _createResolver([options, descriptors]);
<add> const opts = _attachContext(resolver, {dymmy: true});
<add> const item0 = opts.items[0];
<add> expect(item0.test).toEqual(true);
<add> expect(spy).toHaveBeenCalledWith('items', options.items[0]);
<add> });
<add>
<add> it('should call _fallback with proper value from array when descriptor and defaults are objects', function() {
<add> const spy = jasmine.createSpy('fallback');
<add> const descriptors = {
<add> items: {
<add> _fallback: spy
<add> }
<add> };
<add> const defaults = {
<add> items: {
<add> type: 'defaultType'
<add> }
<add> };
<add> const options = {
<add> items: [{test: true}]
<add> };
<add> const resolver = _createResolver([options, defaults, descriptors]);
<add> const opts = _attachContext(resolver, {dymmy: true});
<add> const item0 = opts.items[0];
<add> console.warn(opts._proxy._scopes);
<add> expect(item0.test).toEqual(true);
<add> expect(spy).toHaveBeenCalledWith('items', options.items[0]);
<add> });
<add>
<ide> it('should support overriding options', function() {
<ide> const options = {
<ide> fn1: ctx => ctx.index,
| 2
|
Text
|
Text
|
use svg instead of png to get better image quality
|
c3139dcc1d485bc71bbc10929529dba2ec0a5a3e
|
<ide><path>README.md
<ide> 
<ide>
<del># [Video.js - HTML5 Video Player](http://videojs.com) [](https://travis-ci.org/videojs/video.js)
<add># [Video.js - HTML5 Video Player](http://videojs.com) [](https://travis-ci.org/videojs/video.js)
<ide>
<ide> > Video.js is a web video player built from the ground up for an HTML5 world. It supports HTML5 and Flash video, as well as YouTube and Vimeo (through [plugins](https://github.com/videojs/video.js/wiki/Plugins)). It supports video playback on desktops and mobile devices. This project was started mid 2010, and the player is now used on over ~~50,000~~ 100,000 websites.
<ide>
| 1
|
PHP
|
PHP
|
improve error message
|
f71bb0414e9f198720b5572abc89d86cfc910b72
|
<ide><path>src/Http/ServerRequest.php
<ide> public function __call($name, $params)
<ide>
<ide> return $this->is(...$params);
<ide> }
<del> throw new BadMethodCallException(sprintf('Method %s does not exist', $name));
<add> throw new BadMethodCallException(sprintf('Method "%s()" does not exist', $name));
<ide> }
<ide>
<ide> /**
| 1
|
Go
|
Go
|
fix absolute checks
|
49c1b51ae22e54edc2e13f3195583e47b987fc15
|
<ide><path>api/client/cp.go
<ide> import (
<ide> Cli "github.com/docker/docker/cli"
<ide> "github.com/docker/docker/pkg/archive"
<ide> flag "github.com/docker/docker/pkg/mflag"
<add> "github.com/docker/docker/pkg/system"
<ide> )
<ide>
<ide> type copyDirection int
<ide> func (cli *DockerCli) CmdCp(args ...string) error {
<ide> // client, a `:` could be part of an absolute Windows path, in which case it
<ide> // is immediately proceeded by a backslash.
<ide> func splitCpArg(arg string) (container, path string) {
<del> if filepath.IsAbs(arg) {
<add> if system.IsAbs(arg) {
<ide> // Explicit local absolute path, e.g., `C:\foo` or `/foo`.
<ide> return "", arg
<ide> }
<ide> func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (er
<ide> // If the destination is a symbolic link, we should evaluate it.
<ide> if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
<ide> linkTarget := dstStat.LinkTarget
<del> if !filepath.IsAbs(linkTarget) {
<add> if !system.IsAbs(linkTarget) {
<ide> // Join with the parent directory.
<ide> dstParent, _ := archive.SplitPathDirEntry(dstPath)
<ide> linkTarget = filepath.Join(dstParent, linkTarget)
<ide><path>builder/dispatchers.go
<ide> package builder
<ide> import (
<ide> "fmt"
<ide> "io/ioutil"
<del> "path"
<add> "os"
<ide> "path/filepath"
<ide> "regexp"
<ide> "runtime"
<ide> import (
<ide> flag "github.com/docker/docker/pkg/mflag"
<ide> "github.com/docker/docker/pkg/nat"
<ide> "github.com/docker/docker/pkg/stringutils"
<add> "github.com/docker/docker/pkg/system"
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<ide> func workdir(b *builder, args []string, attributes map[string]bool, original str
<ide> return err
<ide> }
<ide>
<del> // Note that workdir passed comes from the Dockerfile. Hence it is in
<del> // Linux format using forward-slashes, even on Windows. However,
<del> // b.Config.WorkingDir is in platform-specific notation (in other words
<del> // on Windows will use `\`
<del> workdir := args[0]
<add> // This is from the Dockerfile and will not necessarily be in platform
<add> // specific semantics, hence ensure it is converted.
<add> workdir := filepath.FromSlash(args[0])
<ide>
<del> isAbs := false
<del> if runtime.GOOS == "windows" {
<del> // Alternate processing for Windows here is necessary as we can't call
<del> // filepath.IsAbs(workDir) as that would verify Windows style paths,
<del> // along with drive-letters (eg c:\pathto\file.txt). We (arguably
<del> // correctly or not) check for both forward and back slashes as this
<del> // is what the 1.4.2 GoLang implementation of IsAbs() does in the
<del> // isSlash() function.
<del> isAbs = workdir[0] == '\\' || workdir[0] == '/'
<del> } else {
<del> isAbs = filepath.IsAbs(workdir)
<del> }
<del>
<del> if !isAbs {
<del> current := b.Config.WorkingDir
<del> if runtime.GOOS == "windows" {
<del> // Convert to Linux format before join
<del> current = strings.Replace(current, "\\", "/", -1)
<del> }
<del> // Must use path.Join so works correctly on Windows, not filepath
<del> workdir = path.Join("/", current, workdir)
<add> if !system.IsAbs(workdir) {
<add> current := filepath.FromSlash(b.Config.WorkingDir)
<add> workdir = filepath.Join(string(os.PathSeparator), current, workdir)
<ide> }
<ide>
<del> // Convert to platform specific format
<del> if runtime.GOOS == "windows" {
<del> workdir = strings.Replace(workdir, "/", "\\", -1)
<del> }
<ide> b.Config.WorkingDir = workdir
<ide>
<ide> return b.commit("", b.Config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
<ide><path>builder/internals.go
<ide> func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath stri
<ide>
<ide> // Twiddle the destPath when its a relative path - meaning, make it
<ide> // relative to the WORKINGDIR
<del> if !filepath.IsAbs(destPath) {
<add> if !system.IsAbs(destPath) {
<ide> hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
<ide> destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)
<ide>
<ide><path>pkg/archive/copy.go
<ide> import (
<ide> "strings"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/pkg/system"
<ide> )
<ide>
<ide> // Errors used or returned by this file.
<ide> func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
<ide> return CopyInfo{}, err
<ide> }
<ide>
<del> if !filepath.IsAbs(linkTarget) {
<add> if !system.IsAbs(linkTarget) {
<ide> // Join with the parent directory.
<ide> dstParent, _ := SplitPathDirEntry(path)
<ide> linkTarget = filepath.Join(dstParent, linkTarget)
<ide><path>pkg/symlink/fs.go
<ide> import (
<ide> "os"
<ide> "path/filepath"
<ide> "strings"
<add>
<add> "github.com/docker/docker/pkg/system"
<ide> )
<ide>
<ide> // FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
<ide> func evalSymlinksInScope(path, root string) (string, error) {
<ide> if err != nil {
<ide> return "", err
<ide> }
<del> if filepath.IsAbs(dest) {
<add> if system.IsAbs(dest) {
<ide> b.Reset()
<ide> }
<ide> path = dest + string(filepath.Separator) + path
| 5
|
PHP
|
PHP
|
fix bug in core script
|
98ea9ac41f26164eef39ee4d36da11658220e117
|
<ide><path>laravel/core.php
<ide> |
<ide> */
<ide>
<del>if ( ! is_null($environment))
<add>if (isset($environment))
<ide> {
<ide> Request::set_env($environment);
<ide> }
| 1
|
Python
|
Python
|
resolve pr comments
|
dfce40969141eb037e8af3ed64e490a876386bf5
|
<ide><path>examples/run_summarization_finetuning.py
<ide> """ Finetuning seq2seq models for sequence generation."""
<ide>
<ide> import argparse
<del>from collections import deque
<add>import functools
<ide> import logging
<ide> import os
<del>import pickle
<ide> import random
<ide> import sys
<ide>
<ide> from torch.optim import Adam
<ide> from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
<ide>
<del>from transformers import AutoTokenizer, PreTrainedSeq2seq, Model2Model
<add>from transformers import (
<add> AutoTokenizer,
<add> BertForMaskedLM,
<add> BertConfig,
<add> PreTrainedSeq2seq,
<add> Model2Model,
<add>)
<add>
<add>from utils_summarization import (
<add> CNNDailyMailDataset,
<add> encode_for_summarization,
<add> fit_to_block_size,
<add> build_lm_labels,
<add> build_mask,
<add> compute_token_type_ids,
<add>)
<ide>
<ide> logger = logging.getLogger(__name__)
<ide> logging.basicConfig(stream=sys.stdout, level=logging.INFO)
<ide> def set_seed(args):
<ide> # ------------
<ide>
<ide>
<del>class TextDataset(Dataset):
<del> """ Abstracts the dataset used to train seq2seq models.
<del>
<del> CNN/Daily News:
<del>
<del> The CNN/Daily News raw datasets are downloaded from [1]. The stories are
<del> stored in different files; the summary appears at the end of the story as
<del> sentences that are prefixed by the special `@highlight` line. To process
<del> the data, untar both datasets in the same folder, and pass the path to this
<del> folder as the "data_dir argument. The formatting code was inspired by [2].
<del>
<del> [1] https://cs.nyu.edu/~kcho/
<del> [2] https://github.com/abisee/cnn-dailymail/
<del> """
<del>
<del> def __init__(self, tokenizer, prefix="train", data_dir="", block_size=512):
<del> assert os.path.isdir(data_dir)
<del>
<del> # Load the features that have already been computed, if any
<del> cached_features_file = os.path.join(
<del> data_dir, "cached_lm_{}_{}".format(block_size, prefix)
<del> )
<del> if os.path.exists(cached_features_file):
<del> logger.info("Loading features from cached file %s", cached_features_file)
<del> with open(cached_features_file, "rb") as source:
<del> self.examples = pickle.load(source)
<del> return
<del>
<del> logger.info("Creating features from dataset at %s", data_dir)
<del> datasets = ["cnn", "dailymail"]
<del>
<del> self.examples = {"source": [], "target": []}
<del> for dataset in datasets:
<del> path_to_stories = os.path.join(data_dir, dataset, "stories")
<del> story_filenames_list = os.listdir(path_to_stories)
<del> for story_filename in story_filenames_list:
<del> path_to_story = os.path.join(path_to_stories, story_filename)
<del> if not os.path.isfile(path_to_story):
<del> continue
<del>
<del> with open(path_to_story, encoding="utf-8") as source:
<del> raw_story = source.read()
<del> story_lines, summary_lines = process_story(raw_story)
<del> if len(summary_lines) == 0 or len(story_lines) == 0:
<del> continue
<del>
<del> story_token_ids, summary_token_ids = _encode_for_summarization(
<del> story_lines, summary_lines, tokenizer
<del> )
<del> story_seq = _fit_to_block_size(story_token_ids, block_size)
<del> self.examples["source"].append(story_seq)
<del>
<del> summary_seq = _fit_to_block_size(summary_token_ids, block_size)
<del> self.examples["summary"].append(summary_seq)
<del>
<del> logger.info("Saving features into cache file %s", cached_features_file)
<del> with open(cached_features_file, "wb") as sink:
<del> pickle.dump(self.examples, sink, protocol=pickle.HIGHEST_PROTOCOL)
<del>
<del> def __len__(self):
<del> return len(self.examples)
<del>
<del> def __getitem__(self, items):
<del> return (
<del> torch.tensor(self.examples["source"][items]),
<del> torch.tensor(self.examples["target"][items]),
<del> )
<del>
<del>
<del>def process_story(raw_story):
<del> """ Extract the story and summary from a story file.
<del>
<del> Attributes:
<del> raw_story (str): content of the story file as an utf-8 encoded string.
<del>
<del> Raises:
<del> IndexError: If the stoy is empty or contains no highlights.
<del> """
<del> nonempty_lines = list(
<del> filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])
<del> )
<del>
<del> # for some unknown reason some lines miss a period, add it
<del> nonempty_lines = [_add_missing_period(line) for line in nonempty_lines]
<del>
<del> # gather article lines
<del> story_lines = []
<del> lines = deque(nonempty_lines)
<del> while True:
<del> try:
<del> element = lines.popleft()
<del> if element.startswith("@highlight"):
<del> break
<del> story_lines.append(element)
<del> except IndexError:
<del> # if "@highlight" is absent from the file we pop
<del> # all elements until there is None.
<del> return story_lines, []
<del>
<del> # gather summary lines
<del> summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines))
<del>
<del> return story_lines, summary_lines
<del>
<del>
<del>def _encode_for_summarization(story_lines, summary_lines, tokenizer):
<del> """ Encode the story and summary lines, and join them
<del> as specified in [1] by using `[SEP] [CLS]` tokens to separate
<del> sentences.
<del> """
<del> story_lines_token_ids = [
<del> tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
<del> for line in story_lines
<del> ]
<del> summary_lines_token_ids = [
<del> tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
<del> for line in summary_lines
<del> ]
<del>
<del> story_token_ids = [
<del> token for sentence in story_lines_token_ids for token in sentence
<del> ]
<del> summary_token_ids = [
<del> token for sentence in summary_lines_token_ids for token in sentence
<del> ]
<del>
<del> return story_token_ids, summary_token_ids
<del>
<del>
<del>def _add_missing_period(line):
<del> END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"]
<del> if line.startswith("@highlight"):
<del> return line
<del> if line[-1] in END_TOKENS:
<del> return line
<del> return line + "."
<del>
<del>
<del>def _fit_to_block_size(sequence, block_size):
<del> """ Adapt the source and target sequences' lengths to the block size.
<del> If the sequence is shorter than the block size we pad it with -1 ids
<del> which correspond to padding tokens.
<del> """
<del> if len(sequence) > block_size:
<del> return sequence[:block_size]
<del> else:
<del> sequence.extend([0] * (block_size - len(sequence)))
<del> return sequence
<del>
<del>
<del>def mask_padding_tokens(sequence):
<del> """ Padding token, encoded as 0, are represented by the value -1 in the
<del> masks """
<del> padded = sequence.clone()
<del> padded[padded == 0] = -1
<del> return padded
<del>
<del>
<ide> def load_and_cache_examples(args, tokenizer):
<del> dataset = TextDataset(tokenizer, data_dir=args.data_dir)
<add> dataset = CNNDailyMailDataset(tokenizer, data_dir=args.data_dir)
<ide> return dataset
<ide>
<ide>
<del>def compute_token_type_ids(batch, separator_token_id):
<del> """ Segment embeddings as described in [1]
<del>
<del> The values {0,1} were found in the repository [2].
<del>
<del> Attributes:
<del> batch: torch.Tensor, size [batch_size, block_size]
<del> Batch of input.
<del> separator_token_id: int
<del> The value of the token that separates the segments.
<add>def collate(data, tokenizer, block_size):
<add> """ List of tuple as an input. """
<add> # remove the files with empty an story/summary, encode and fit to block
<add> data = filter(lambda x: not (len(x[0]) == 0 or len(x[1]) == 0), data)
<add> data = [
<add> encode_for_summarization(story, summary, tokenizer) for story, summary in data
<add> ]
<add> data = [
<add> (
<add> fit_to_block_size(story, block_size, tokenizer.pad_token_id),
<add> fit_to_block_size(summary, block_size, tokenizer.pad_token_id),
<add> )
<add> for story, summary in data
<add> ]
<ide>
<del> [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
<del> arXiv preprint arXiv:1908.08345 (2019).
<del> [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217)
<del> """
<del> batch_embeddings = []
<del> sentence_num = 0
<del> for sequence in batch:
<del> embeddings = []
<del> for s in sequence:
<del> if s == separator_token_id:
<del> sentence_num += 1
<del> embeddings.append(sentence_num % 2)
<del> batch_embeddings.append(embeddings)
<del> return torch.tensor(batch_embeddings)
<add> stories = torch.tensor([story for story, summary in data])
<add> summaries = torch.tensor([summary for story, summary in data])
<add> encoder_token_type_ids = compute_token_type_ids(stories, tokenizer.cls_token_id)
<add> encoder_mask = build_mask(stories, tokenizer.pad_token_id)
<add> decoder_mask = build_mask(summaries, tokenizer.pad_token_id)
<add> lm_labels = build_lm_labels(summaries, tokenizer.pad_token_id)
<add>
<add> return (
<add> stories,
<add> summaries,
<add> encoder_token_type_ids,
<add> encoder_mask,
<add> decoder_mask,
<add> lm_labels,
<add> )
<ide>
<ide>
<ide> # ----------
<ide> class BertSumOptimizer(object):
<ide> arXiv preprint arXiv:1908.08345 (2019).
<ide> """
<ide>
<del> def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-9):
<add> def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
<ide> self.encoder = model.encoder
<ide> self.decoder = model.decoder
<ide> self.lr = lr
<ide> def train(args, model, tokenizer):
<ide> args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
<ide> train_dataset = load_and_cache_examples(args, tokenizer)
<ide> train_sampler = RandomSampler(train_dataset)
<add> model_collate_fn = functools.partial(collate, tokenizer=tokenizer, block_size=512)
<ide> train_dataloader = DataLoader(
<del> train_dataset, sampler=train_sampler, batch_size=args.train_batch_size
<add> train_dataset,
<add> sampler=train_sampler,
<add> batch_size=args.train_batch_size,
<add> collate_fn=model_collate_fn,
<ide> )
<ide>
<ide> # Training schedule
<ide> def train(args, model, tokenizer):
<ide> for _ in train_iterator:
<ide> epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
<ide> for step, batch in enumerate(epoch_iterator):
<del> source, target = batch
<del> token_type_ids = compute_token_type_ids(source, tokenizer.cls_token_id)
<del> labels_src = mask_padding_tokens(source)
<del> labels_tgt = mask_padding_tokens(target)
<add> source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch
<ide>
<ide> source = source.to(args.device)
<ide> target = target.to(args.device)
<del> token_type_ids = token_type_ids.to(args.device)
<del> labels_src = labels_src.to(args.device)
<del> labels_tgt = labels_tgt.to(args.device)
<add> encoder_token_type_ids = encoder_token_type_ids.to(args.device)
<add> encoder_mask = encoder_mask.to(args.device)
<add> decoder_mask = decoder_mask.to(args.device)
<add> lm_labels = lm_labels.to(args.device)
<ide>
<ide> model.train()
<ide> outputs = model(
<ide> source,
<ide> target,
<del> token_type_ids=token_type_ids,
<del> decoder_encoder_attention_mask=labels_src,
<del> decoder_attention_mask=labels_tgt,
<del> decoder_lm_labels=labels_tgt,
<del> decoder_initialize_randomly=True,
<add> encoder_token_type_ids=encoder_token_type_ids,
<add> encoder_attention_mask=encoder_mask,
<add> decoder_attention_mask=decoder_mask,
<add> decoder_lm_labels=lm_labels,
<ide> )
<ide>
<ide> loss = outputs[0]
<ide> def evaluate(args, model, tokenizer, prefix=""):
<ide> model.eval()
<ide>
<ide> for batch in tqdm(eval_dataloader, desc="Evaluating"):
<del> source, target = batch
<del> labels_src = mask_padding_tokens(source)
<del> labels_tgt = mask_padding_tokens(target)
<del> source.to(args.device)
<del> target.to(args.device)
<del> labels_src.to(args.device)
<del> labels_tgt.to(args.device)
<add> source, target, encoder_token_type_ids, encoder_mask, decoder_mask, lm_labels = batch
<add>
<add> source = source.to(args.device)
<add> target = target.to(args.device)
<add> encoder_token_type_ids = encoder_token_type_ids.to(args.device)
<add> encoder_mask = encoder_mask.to(args.device)
<add> decoder_mask = decoder_mask.to(args.device)
<add> lm_labels = lm_labels.to(args.device)
<ide>
<ide> with torch.no_grad():
<ide> outputs = model(
<ide> source,
<ide> target,
<del> decoder_encoder_attention_mask=labels_src,
<del> decoder_attention_mask=labels_tgt,
<del> decoder_lm_labels=labels_tgt,
<add> encoder_token_type_ids=encoder_token_type_ids,
<add> encoder_attention_mask=encoder_mask,
<add> decoder_attention_mask=decoder_mask,
<add> decoder_lm_labels=lm_labels,
<ide> )
<ide> lm_loss = outputs[0]
<ide> eval_loss += lm_loss.mean().item()
<ide> def main():
<ide> )
<ide> parser.add_argument(
<ide> "--num_train_epochs",
<del> default=1,
<add> default=10,
<ide> type=int,
<ide> help="Total number of training epochs to perform.",
<ide> )
<ide> def main():
<ide> args.device = torch.device("cuda")
<ide> args.n_gpu = torch.cuda.device_count()
<ide>
<del> # Load pretrained model and tokenizer
<add> # Load pretrained model and tokenizer. The decoder's weights are randomly initialized.
<ide> tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
<del> model = Model2Model.from_pretrained(args.model_name_or_path)
<add> config = BertConfig.from_pretrained(args.model_name_or_path)
<add> decoder_model = BertForMaskedLM(config)
<add> model = Model2Model.from_pretrained(
<add> args.model_name_or_path, decoder_model=decoder_model
<add> )
<ide>
<ide> # Setup logging
<ide> logging.basicConfig(
<ide><path>examples/run_summarization_finetuning_test.py
<del># coding=utf-8
<del># Copyright 2019 HuggingFace Inc.
<del>#
<del># Licensed under the Apache License, Version 2.0 (the "License");
<del># you may not use this file except in compliance with the License.
<del># You may obtain a copy of the License at
<del>#
<del># http://www.apache.org/licenses/LICENSE-2.0
<del>#
<del># Unless required by applicable law or agreed to in writing, software
<del># distributed under the License is distributed on an "AS IS" BASIS,
<del># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<del># See the License for the specific language governing permissions and
<del># limitations under the License.
<del>import unittest
<del>
<del>from run_summarization_finetuning import _fit_to_block_size, process_story
<del>
<del>
<del>class DataLoaderTest(unittest.TestCase):
<del> def setUp(self):
<del> self.block_size = 10
<del>
<del> def test_truncate_sequence_too_small(self):
<del> """ Pad the sequence with 0 if the sequence is smaller than the block size."""
<del> sequence = [1, 2, 3, 4]
<del> expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
<del> self.assertEqual(_fit_to_block_size(sequence, self.block_size), expected_output)
<del>
<del> def test_truncate_sequence_fit_exactly(self):
<del> sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<del> expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<del> self.assertEqual(_fit_to_block_size(sequence, self.block_size), expected_output)
<del>
<del> def test_truncate_sequence_too_big(self):
<del> sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
<del> expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<del> self.assertEqual(_fit_to_block_size(sequence, self.block_size), expected_output)
<del>
<del> def test_process_story_no_highlights(self):
<del> """ Processing a story with no highlights should raise an exception.
<del> """
<del> raw_story = """It was the year of Our Lord one thousand seven hundred and
<del> seventy-five.\n\nSpiritual revelations were conceded to England at that
<del> favoured period, as at this."""
<del> _, summary = process_story(raw_story)
<del> self.assertEqual(summary, [])
<del>
<del> def test_process_empty_story(self):
<del> """ An empty story should also raise and exception.
<del> """
<del> raw_story = ""
<del> story, summary = process_story(raw_story)
<del> self.assertEqual(story, [])
<del> self.assertEqual(summary, [])
<del>
<del> def test_story_with_missing_period(self):
<del> raw_story = (
<del> "It was the year of Our Lord one thousand seven hundred and "
<del> "seventy-five\n\nSpiritual revelations were conceded to England "
<del> "at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
<del> )
<del> story_lines, summary_lines = process_story(raw_story)
<del>
<del> expected_story_lines = [
<del> "It was the year of Our Lord one thousand seven hundred and seventy-five.",
<del> "Spiritual revelations were conceded to England at that favoured period, as at this.",
<del> ]
<del> self.assertEqual(expected_story_lines, story_lines)
<del>
<del> expected_summary_lines = ["It was the best of times."]
<del> self.assertEqual(expected_summary_lines, summary_lines)
<del>
<del>
<del>if __name__ == "__main__":
<del> unittest.main()
<ide><path>examples/utils_summarization.py
<add>from collections import deque
<add>import os
<add>
<add>import torch
<add>from torch.utils.data import Dataset
<add>
<add>
<add># ------------
<add># Data loading
<add># ------------
<add>
<add>
<add>class CNNDailyMailDataset(Dataset):
<add> """ Abstracts the dataset used to train seq2seq models.
<add>
<add> CNN/Daily News:
<add>
<add> The CNN/Daily News raw datasets are downloaded from [1]. The stories are
<add> stored in different files; the summary appears at the end of the story as
<add> sentences that are prefixed by the special `@highlight` line. To process
<add> the data, untar both datasets in the same folder, and pass the path to this
<add> folder as the "data_dir argument. The formatting code was inspired by [2].
<add>
<add> [1] https://cs.nyu.edu/~kcho/
<add> [2] https://github.com/abisee/cnn-dailymail/
<add> """
<add>
<add> def __init__(self, tokenizer, prefix="train", data_dir=""):
<add> assert os.path.isdir(data_dir)
<add> self.tokenizer = tokenizer
<add>
<add> # We initialize the class by listing all the files that contain
<add> # stories and summaries. Files are not read in memory given
<add> # the size of the corpus.
<add> self.stories_path = []
<add> datasets = ("cnn", "dailymail")
<add> for dataset in datasets:
<add> path_to_stories = os.path.join(data_dir, dataset, "stories")
<add> story_filenames_list = os.listdir(path_to_stories)
<add> for story_filename in story_filenames_list:
<add> path_to_story = os.path.join(path_to_stories, story_filename)
<add> if not os.path.isfile(path_to_story):
<add> continue
<add> self.stories_path.append(path_to_story)
<add>
<add> def __len__(self):
<add> return len(self.stories_path)
<add>
<add> def __getitem__(self, idx):
<add> story_path = self.stories_path[idx]
<add> with open(story_path, encoding="utf-8") as source:
<add> raw_story = source.read()
<add> story_lines, summary_lines = process_story(raw_story)
<add> return story_lines, summary_lines
<add>
<add>
<add>def process_story(raw_story):
<add> """ Extract the story and summary from a story file.
<add>
<add> Attributes:
<add> raw_story (str): content of the story file as an utf-8 encoded string.
<add>
<add> Raises:
<add> IndexError: If the stoy is empty or contains no highlights.
<add> """
<add> nonempty_lines = list(
<add> filter(lambda x: len(x) != 0, [line.strip() for line in raw_story.split("\n")])
<add> )
<add>
<add> # for some unknown reason some lines miss a period, add it
<add> nonempty_lines = [_add_missing_period(line) for line in nonempty_lines]
<add>
<add> # gather article lines
<add> story_lines = []
<add> lines = deque(nonempty_lines)
<add> while True:
<add> try:
<add> element = lines.popleft()
<add> if element.startswith("@highlight"):
<add> break
<add> story_lines.append(element)
<add> except IndexError:
<add> # if "@highlight" is absent from the file we pop
<add> # all elements until there is None.
<add> return story_lines, []
<add>
<add> # gather summary lines
<add> summary_lines = list(filter(lambda t: not t.startswith("@highlight"), lines))
<add>
<add> return story_lines, summary_lines
<add>
<add>
<add>def _add_missing_period(line):
<add> END_TOKENS = [".", "!", "?", "...", "'", "`", '"', u"\u2019", u"\u2019", ")"]
<add> if line.startswith("@highlight"):
<add> return line
<add> if line[-1] in END_TOKENS:
<add> return line
<add> return line + "."
<add>
<add>
<add># --------------------------
<add># Encoding and preprocessing
<add># --------------------------
<add>
<add>
<add>def fit_to_block_size(sequence, block_size, pad_token):
<add> """ Adapt the source and target sequences' lengths to the block size.
<add> If the sequence is shorter than the block size we pad it with -1 ids
<add> which correspond to padding tokens.
<add> """
<add> if len(sequence) > block_size:
<add> return sequence[:block_size]
<add> else:
<add> sequence.extend([pad_token] * (block_size - len(sequence)))
<add> return sequence
<add>
<add>
<add>def build_lm_labels(sequence, pad_token):
<add> """ Padding token, encoded as 0, are represented by the value -1 so they
<add> are not taken into account in the loss computation. """
<add> padded = sequence.clone()
<add> padded[padded == pad_token] = -1
<add> return padded
<add>
<add>
<add>def build_mask(sequence, pad_token):
<add> """ Builds the mask. The attention mechanism will only attend to positions
<add> with value 1. """
<add> mask = sequence.clone()
<add> mask[mask != pad_token] = 1
<add> mask[mask == pad_token] = 0
<add> return mask
<add>
<add>
<add>def encode_for_summarization(story_lines, summary_lines, tokenizer):
<add> """ Encode the story and summary lines, and join them
<add> as specified in [1] by using `[SEP] [CLS]` tokens to separate
<add> sentences.
<add> """
<add> story_lines_token_ids = [
<add> tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
<add> for line in story_lines
<add> ]
<add> summary_lines_token_ids = [
<add> tokenizer.add_special_tokens_single_sequence(tokenizer.encode(line))
<add> for line in summary_lines
<add> ]
<add>
<add> story_token_ids = [
<add> token for sentence in story_lines_token_ids for token in sentence
<add> ]
<add> summary_token_ids = [
<add> token for sentence in summary_lines_token_ids for token in sentence
<add> ]
<add>
<add> return story_token_ids, summary_token_ids
<add>
<add>
<add>def compute_token_type_ids(batch, separator_token_id):
<add> """ Segment embeddings as described in [1]
<add>
<add> The values {0,1} were found in the repository [2].
<add>
<add> Attributes:
<add> batch: torch.Tensor, size [batch_size, block_size]
<add> Batch of input.
<add> separator_token_id: int
<add> The value of the token that separates the segments.
<add>
<add> [1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
<add> arXiv preprint arXiv:1908.08345 (2019).
<add> [2] https://github.com/nlpyang/PreSumm (/src/prepro/data_builder.py, commit fac1217)
<add> """
<add> batch_embeddings = []
<add> for sequence in batch:
<add> sentence_num = 0
<add> embeddings = []
<add> for s in sequence:
<add> if s == separator_token_id:
<add> sentence_num += 1
<add> embeddings.append(sentence_num % 2)
<add> batch_embeddings.append(embeddings)
<add> return torch.tensor(batch_embeddings)
<ide><path>examples/utils_summarization_test.py
<add># coding=utf-8
<add># Copyright 2019 HuggingFace Inc.
<add>#
<add># Licensed under the Apache License, Version 2.0 (the "License");
<add># you may not use this file except in compliance with the License.
<add># You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>import unittest
<add>
<add>import numpy as np
<add>import torch
<add>
<add>from utils_summarization import (
<add> compute_token_type_ids,
<add> fit_to_block_size,
<add> build_mask,
<add> build_lm_labels,
<add> process_story,
<add>)
<add>
<add>
<add>class SummarizationDataProcessingTest(unittest.TestCase):
<add> def setUp(self):
<add> self.block_size = 10
<add>
<add> def test_fit_to_block_sequence_too_small(self):
<add> """ Pad the sequence with 0 if the sequence is smaller than the block size."""
<add> sequence = [1, 2, 3, 4]
<add> expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
<add> self.assertEqual(
<add> fit_to_block_size(sequence, self.block_size, 0), expected_output
<add> )
<add>
<add> def test_fit_to_block_sequence_fit_exactly(self):
<add> """ Do nothing if the sequence is the right size. """
<add> sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<add> expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<add> self.assertEqual(
<add> fit_to_block_size(sequence, self.block_size, 0), expected_output
<add> )
<add>
<add> def test_fit_to_block_sequence_too_big(self):
<add> """ Truncate the sequence if it is too long. """
<add> sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
<add> expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
<add> self.assertEqual(
<add> fit_to_block_size(sequence, self.block_size, 0), expected_output
<add> )
<add>
<add> def test_process_story_no_highlights(self):
<add> """ Processing a story with no highlights returns an empty list for the summary.
<add> """
<add> raw_story = """It was the year of Our Lord one thousand seven hundred and
<add> seventy-five.\n\nSpiritual revelations were conceded to England at that
<add> favoured period, as at this."""
<add> _, summary_lines = process_story(raw_story)
<add> self.assertEqual(summary_lines, [])
<add>
<add> def test_process_empty_story(self):
<add> """ An empty story returns an empty collection of lines.
<add> """
<add> raw_story = ""
<add> story_lines, summary_lines = process_story(raw_story)
<add> self.assertEqual(story_lines, [])
<add> self.assertEqual(summary_lines, [])
<add>
<add> def test_process_story_with_missing_period(self):
<add> raw_story = (
<add> "It was the year of Our Lord one thousand seven hundred and "
<add> "seventy-five\n\nSpiritual revelations were conceded to England "
<add> "at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
<add> )
<add> story_lines, summary_lines = process_story(raw_story)
<add>
<add> expected_story_lines = [
<add> "It was the year of Our Lord one thousand seven hundred and seventy-five.",
<add> "Spiritual revelations were conceded to England at that favoured period, as at this.",
<add> ]
<add> self.assertEqual(expected_story_lines, story_lines)
<add>
<add> expected_summary_lines = ["It was the best of times."]
<add> self.assertEqual(expected_summary_lines, summary_lines)
<add>
<add> def test_build_lm_labels_no_padding(self):
<add> sequence = torch.tensor([1, 2, 3, 4])
<add> expected = sequence
<add> np.testing.assert_array_equal(
<add> build_lm_labels(sequence, 0).numpy(), expected.numpy()
<add> )
<add>
<add> def test_build_lm_labels(self):
<add> sequence = torch.tensor([1, 2, 3, 4, 0, 0, 0])
<add> expected = torch.tensor([1, 2, 3, 4, -1, -1, -1])
<add> np.testing.assert_array_equal(
<add> build_lm_labels(sequence, 0).numpy(), expected.numpy()
<add> )
<add>
<add> def test_build_mask_no_padding(self):
<add> sequence = torch.tensor([1, 2, 3, 4])
<add> expected = torch.tensor([1, 1, 1, 1])
<add> np.testing.assert_array_equal(
<add> build_mask(sequence, 0).numpy(), expected.numpy()
<add> )
<add>
<add> def test_build_mask(self):
<add> sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23])
<add> expected = torch.tensor([1, 1, 1, 1, 0, 0, 0])
<add> np.testing.assert_array_equal(
<add> build_mask(sequence, 23).numpy(), expected.numpy()
<add> )
<add>
<add> def test_compute_token_type_ids(self):
<add> separator = 101
<add> batch = torch.tensor(
<add> [[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]
<add> )
<add> expected = torch.tensor(
<add> [[0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1], [0, 1, 1, 1, 0, 0]]
<add> )
<add>
<add> result = compute_token_type_ids(batch, separator)
<add> np.testing.assert_array_equal(result, expected)
<add>
<add>
<add>if __name__ == "__main__":
<add> unittest.main()
<ide><path>transformers/modeling_beam_search.py
<ide> from torch import nn
<ide>
<ide>
<del>class ModelWithBeamSearch(nn.Module):
<add>class TransformerBeamSearch(nn.Module):
<ide> def __init__(
<ide> self,
<ide> model,
<add> tokenizer,
<add> batch_size,
<ide> beam_size,
<del> start_token_id,
<del> end_token_id,
<del> pad_token_id,
<ide> min_length,
<ide> max_length,
<del> alpha,
<del> block_trigram=True,
<add> alpha=0,
<add> block_repeating_trigram=True,
<ide> ):
<ide> """
<ide> Attributes:
<ide> mask_word_id: token id that corresponds to the mask
<ide> """
<del> super(ModelWithBeamSearch, self).__init__()
<add> super(TransformerBeamSearch, self).__init__()
<ide> self.model = model
<add> self.tokenizer = tokenizer
<add>
<add> self.start_token_id = tokenizer.start_token_id
<add> self.end_token_id = tokenizer.end_token_id
<add> self.pad_token_id = tokenizer.pad_token_id
<add>
<ide> self.beam_size = beam_size
<del> self.start_token_id = start_token_id
<del> self.end_token_id = end_token_id
<del> self.pad_token_id = pad_token_id
<ide> self.min_length = min_length
<ide> self.max_length = max_length
<add>
<add> self.block_repeating_trigram = block_repeating_trigram
<add> self.apply_length_penalty = False if alpha == 0 else True
<ide> self.alpha = alpha
<del> self.block_trigram = block_trigram
<ide>
<del> def forward(self, input_ids, **kwargs):
<del> # Separate the encoder- and decoder- specific kwargs. A kwarg is
<del> # decoder-specific it the key starts with `decoder_`
<add> # State of the beam
<add> self.hypotheses = [[] for _ in range(batch_size)]
<add> self.batch_offset = torch.arange(batch_size, dtype=torch.long)
<add> self.beam_offset = torch.arange(
<add> 0, batch_size * self.beam_size, step=self.beam_size, dtype=torch.long
<add> )
<add> self.growing_beam = torch.full(
<add> (batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
<add> )
<add> self.topk_log_probabilities = torch.tensor(
<add> [0.0] + [float("-inf")] * (self.beam_size - 1), dtype=torch.float
<add> ).repeat(batch_size)
<add> self.results = {
<add> "prediction": [[] for _ in batch_size],
<add> "scores": [[] for _ in batch_size],
<add> }
<add> self._step = 0
<add> self.is_done = False
<add>
<add> def step(self, log_probabilities):
<add> """ Grows the beam by one step. """
<add> self._step += 1
<add>
<add> # The batch size changes as some beams finish so we define _B
<add> vocab_size = log_probabilities.size(-1)
<add> _B = log_probabilities.size(0) // self.beam_size
<add>
<add> # Multiply each beam probability with the probability of the
<add> # next token (conditioned on the words in the beam).
<add> log_probabilities += self.topk_log_probabilities.view(-1, 1)
<add>
<add> self.enforce_min_length(log_probabilities)
<add> if self.block_repeating_trigram:
<add> self.remove_repeating_trigrams(log_probabilities, _B)
<add>
<add> # Find the `beam_size` (previous_beam + token) combinations with
<add> # the highest score
<add> topk_log_probabilities, topk_ids = log_probabilities.topk(
<add> log_probabilities.view(_B, self.beam_size * vocab_size),
<add> self.beam_size,
<add> dim=1,
<add> )
<add>
<add> # Apply the length penalty. The +1 accounts for the [EOS] token
<add> # that will be added if the beam ends.
<add> topk_scores = topk_log_probabilities / self.length_penalty()
<add>
<add> # Retrieve the corresponding respective beam and token id
<add> # topk_token_ids[i] will be added to topk_beam_ids[i]
<add> topk_beam_ids = topk_ids.div(vocab_size)
<add> topk_token_ids = topk_ids.fmod(vocab_size)
<add>
<add> # Retrieve the row index of the surviving beams in the original
<add> # view of the log_probabilities tensor
<add> surviving_beams_rows = (topk_beam_ids + self.beam_offset[:_B].view(-1, 1)).view(
<add> -1
<add> )
<add>
<add> # Append the last predictions
<add> self.growing_beam = torch.cat(
<add> [
<add> self.growing_beam.index_select(0, surviving_beams_rows),
<add> topk_token_ids.view(-1, 1),
<add> ],
<add> 1,
<add> )
<add>
<add> # Check if any of the beam searches has ended during this
<add> # growth step. Also if top beam (most probable) has ended
<add> # for one element of the batch.
<add> is_finished = topk_token_ids.eq(self.end_token_id)
<add> self.enforce_max_length()
<add> is_top_beam_finished = is_finished[:, 0].eq(1)
<add>
<add> # Save the finished searches
<add> if is_finished.any():
<add> predictions = self.growing_beam.view(
<add> -1, self.beam_size, self.growing_beam.size(1)
<add> )
<add> for i in range(is_finished.size(0)):
<add> if is_top_beam_finished[i]:
<add> is_finished[i].fill_(1)
<add> finished_hyp = is_finished[i].nonzero().view(-1)
<add>
<add> # Store finished hypotheses for this batch.
<add> b = self.batch_offset[i]
<add> for j in finished_hyp:
<add> self.hypotheses[b].append((topk_scores[i, j], predictions[i, j, :]))
<add>
<add> # If the batch reached the end, save the best hypotheses
<add> # in terms of length-penalized score.
<add> if is_top_beam_finished[i]:
<add> best_hyp = sorted(
<add> self.hypotheses[b], key=lambda x: x[0], reverse=True
<add> )
<add> best_score, best_prediction = best_hyp[0]
<add> self.results["scores"][b].append(best_score)
<add> self.results["predictions"][b].append(best_prediction)
<add>
<add> non_finished = is_top_beam_finished.eq(0).nonzero().view(-1)
<add> if len(non_finished) == 0:
<add> self.is_done = True
<add>
<add> # Remove finished batches for the next step.
<add> topk_log_probabilities = topk_log_probabilities.index_select(
<add> 0, non_finished
<add> )
<add> self.batch_offset = self.batch_offset.index_select(0, non_finished)
<add> self.growing_beam = predictions.index_select(0, non_finished).view(
<add> -1, self.growing_beam.size(-1)
<add> )
<add>
<add> surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished)
<add>
<add> return surviving_beams_rows
<add>
<add> def forward(self, encoder_input_ids, **kwargs):
<add> # keyword arguments come in 3 flavors: encoder-specific (prefixed by
<add> # `encoder_`), decoder-specific (prefixed by `decoder_`) and those
<add> # that apply to the model as whole.
<add> # We let the specific kwargs override the common ones in case of conflict.
<ide> kwargs_encoder = {
<del> argument: value
<add> argument[len("encoder_"):]: value
<ide> for argument, value in kwargs.items()
<del> if not argument.startswith("decoder_")
<add> if argument.startswith("encoder_")
<ide> }
<ide> kwargs_decoder = {
<ide> argument[len("decoder_"):]: value
<ide> for argument, value in kwargs.items()
<ide> if argument.startswith("decoder_")
<ide> }
<add> kwargs_common = {
<add> argument: value
<add> for argument, value in kwargs.items()
<add> if not (argument.startswith("encoder_") or argument.startswith("decoder_"))
<add> }
<add> kwargs_decoder = dict(kwargs_common, **kwargs_decoder)
<add> kwargs_encoder = dict(kwargs_common, **kwargs_encoder)
<ide>
<del> batch_size, _ = input_ids.size(0)
<del>
<del> # Variables that keep track of the status of the search
<del> hypotheses = [[] for _ in range(batch_size)]
<del> batch_offset = torch.arange(batch_size, dtype=torch.long)
<del> beam_offset = torch.arange(
<del> 0,
<del> batch_size * self.beam_size,
<del> step=self.beam_size,
<del> dtype=torch.long,
<del> )
<del> growing_beam = torch.full(
<del> (batch_size * self.beam_size, 1),
<del> self.start_token_id,
<del> dtype=torch.long,
<del> )
<del> topk_log_probabilities = torch.tensor(
<del> [0.0] + [float("-inf")] * (self.beam_size - 1),
<del> dtype=torch.float,
<del> ).repeat(batch_size)
<del>
<del> # Forward pass on the encoder
<del> encoder_outputs = self.encoder(input_ids, kwargs_encoder)
<add> # forward pass on the encoder
<add> encoder_outputs = self.model.encoder.forward(encoder_input_ids, kwargs_encoder)
<ide> kwargs_decoder["encoder_hidden_states"] = tile(
<ide> encoder_outputs, self.beam_size, dim=0
<ide> )
<ide>
<del> results = {}
<del> results["predictions"] = [[] for _ in batch_size]
<del> results["scores"] = [[] for _ in batch_size]
<del>
<add> # grow the beam by generating sequences in an autoregressive way
<add> self.growing_beam = torch.full(
<add> (self.batch_size * self.beam_size, 1), self.start_token_id, dtype=torch.long
<add> )
<ide> for step in range(self.max_length):
<del> decoder_input = growing_beam[:, -1]
<del> outputs = self.decoder(decoder_input, kwargs_decoder)
<add> decoder_input = self.growing_beam[:, -1]
<add> outputs = self.model.decoder(decoder_input, kwargs_decoder)
<ide> log_probabilities = torch.nn.functional.log_softmax(outputs[1])
<del> vocab_size = log_probabilities.size(-1)
<del>
<del> # The batch size changes as some beams finish so we define:
<del> _B = log_probabilities.size(0) // self.beam_size
<del>
<del> # Multiply each beam probability with the probability of the
<del> # next token (conditioned on the words in the beam).
<del> log_probabilities += topk_log_probabilities.view(-1, 1)
<del>
<del> # if the beam has not attained the minimum required length we
<del> # make the end token arbitrarily unlikely.
<del> if step < self.min_length:
<del> log_probabilities[self.end_token_id] = -1e20
<del>
<del> # Remove repeating tri-grams
<del> if(self.args.block_trigram):
<del> if(step + 1 > 3):
<del> for i in range(_B * self.beam_size):
<del> tokens = [t for t in growing_beam[i]]
<del> trigrams = [(tokens[i-1], tokens[i], tokens[i+1]) for i in range(1, len(words) - 1)]
<del> last_trigram = tuple(trigrams[-1])
<del> if last_trigram in trigrams[:-1]:
<del> log_probabilities[i] = -1e20
<del>
<del> # Find the `beam_size` (previous_beam + token) combinations with
<del> # the highest score
<del> topk_log_probabilities, topk_ids = log_probabilities.topk(
<del> log_probabilities.view(_B, self.beam_size * vocab_size),
<del> self.beam_size,
<del> dim=1
<del> )
<del>
<del> # Apply the length penalty. The +1 accounts for the [EOS] token
<del> # that will be added if the beam ends.
<del> length_penalty = ((5.0 + (step + 1)) / 6.0) ** self.alpha
<del> topk_scores = topk_log_probabilities / length_penalty
<del>
<del> # Retrieve the corresponding respective beam and token id
<del> # topk_token_ids[i] will be added to topk_beam_ids[i]
<del> topk_beam_ids = topk_ids.div(vocab_size)
<del> topk_token_ids = topk_ids.fmod(vocab_size)
<del>
<del> # Retrieve the row index of the surviving beams in the original
<del> # view of the log_probabilities tensor
<del> surviving_beams_rows = (
<del> topk_beam_ids + beam_offset[:_B].view(-1, 1)
<del> ).view(-1)
<del>
<del> # Append the last predictions
<del> growing_beam = torch.cat(
<del> [
<del> growing_beam.index_select(0, surviving_beams_rows),
<del> topk_token_ids.view(-1, 1),
<del> ],
<del> 1,
<del> )
<add> surviving_beams_rows = self.step(log_probabilities)
<add> if self.is_done:
<add> break
<ide>
<del> # Check if any of the beam searches has ended during this
<del> # growth step. Also if top beam (most probable) has ended
<del> # for one element of the batch.
<del> is_finished = topk_token_ids.eq(self.end_token_id)
<del> if step + 1 == self.max_length:
<del> is_finished.fill_(1)
<del> is_top_beam_finished = is_finished[:, 0].eq(1)
<del>
<del> # Save the finished searches
<del> if is_finished.any():
<del> predictions = growing_beam.view(-1, self.beam_size, growing_beam.size(1))
<del> for i in range(is_finished.size(0)):
<del> if is_top_beam_finished[i]:
<del> is_finished[i].fill_(1)
<del> finished_hyp = is_finished[i].nonzero().view(-1)
<del>
<del> # Store finished hypotheses for this batch.
<del> b = batch_offset[i]
<del> for j in finished_hyp:
<del> hypotheses[b].append((topk_scores[i, j], predictions[i, j, :]))
<del>
<del> # If the batch reached the end, save the best hypotheses
<del> # in terms of length-penalized score.
<del> if is_top_beam_finished[i]:
<del> best_hyp = sorted(
<del> hypotheses[b], key=lambda x: x[0], reverse=True
<del> )
<del> best_score, best_prediction = best_hyp[0]
<del> results["scores"][b].append(best_score)
<del> results["predictions"][b].append(best_prediction)
<del>
<del> non_finished = is_top_beam_finished.eq(0).nonzero().view(-1)
<del> if len(non_finished) == 0:
<del> break
<del>
<del> # Remove finished batches for the next step.
<del> topk_log_probabilities = topk_log_probabilities.index_select(0, non_finished)
<del> batch_offset = batch_offset.index_select(0, non_finished)
<del> growing_beam = predictions.index_select(0, non_finished).view(
<del> -1, growing_beam.size(-1)
<del> )
<del>
<del> # Re-order the state for the next pass
<del> surviving_beams_rows = surviving_beams_rows.index_select(0, non_finished)
<ide> kwargs_decoder["encoder_hidden_states"] = kwargs_decoder[
<ide> "encoder_hidden_states"
<ide> ].index_select(0, surviving_beams_rows)
<ide>
<del> return results
<add> return self.results
<add>
<add> def remove_repeating_trigrams(self, log_probabilities, _B):
<add> if(self._step + 1 > 3):
<add> for i in range(_B * self.beam_size):
<add> tokens = [t for t in self.growing_beam[i]]
<add> trigrams = [(tokens[i-1], tokens[i], tokens[i+1]) for i in range(1, len(words) - 1)]
<add> last_trigram = tuple(trigrams[-1])
<add> if last_trigram in trigrams[:-1]:
<add> log_probabilities[i] = -1e20
<add>
<add> def enforce_min_length(self):
<add> if self._step < self.min_length:
<add> self.log_probabilities[self.end_token_id] = -1e20
<add>
<add> def enforce_max_length(self):
<add> if self._step + 1 == self.max_length:
<add> self.is_finished.fill_(1)
<add>
<add> def length_penalty(self):
<add> return ((5.0 + (self._step + 1)) / 6.0) ** self.alpha
<ide>
<ide>
<ide> def tile(x, count, dim=0):
<ide><path>transformers/modeling_bert.py
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_
<ide> """
<ide> if attention_mask is None:
<ide> attention_mask = torch.ones_like(input_ids)
<add> if encoder_attention_mask is None:
<add> encoder_attention_mask = torch.ones_like(input_ids)
<ide> if token_type_ids is None:
<ide> token_type_ids = torch.zeros_like(input_ids)
<ide>
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_
<ide> extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<ide> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
<ide>
<del> # If a 2D encoder attention mask is provided for the cross-attention
<add> # If a 2D ou 3D attention mask is provided for the cross-attention
<ide> # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
<del> if encoder_attention_mask is not None:
<del> encoder_attention_mask = encoder_attention_mask[:, None, None, :]
<del> encoder_attention_mask = encoder_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<del> encoder_attention_mask = (1.0 - encoder_attention_mask) * -10000.0
<add> if encoder_attention_mask.dim() == 3:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
<add> if encoder_attention_mask.dim() == 2:
<add> encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
<add>
<add> encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
<add> encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
<ide>
<ide> # Prepare head mask if needed
<ide> # 1.0 in head_mask indicate we keep the head
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_
<ide> attention_mask=extended_attention_mask,
<ide> head_mask=head_mask,
<ide> encoder_hidden_states=encoder_hidden_states,
<del> encoder_attention_mask=encoder_attention_mask)
<add> encoder_attention_mask=encoder_extended_attention_mask)
<ide> sequence_output = encoder_outputs[0]
<ide> pooled_output = self.pooler(sequence_output)
<ide>
<ide> class BertForMaskedLM(BertPreTrainedModel):
<ide> in ``[0, ..., config.vocab_size]``
<ide>
<ide> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
<del> **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> **masked_lm_loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<ide> Masked language modeling loss.
<add> **next_token_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
<add> Next token prediction loss.
<ide> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
<ide> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
<ide> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
<ide> def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_
<ide>
<ide> if lm_labels is not None:
<ide> # we are doing next-token prediction; shift prediction scores and input ids by one
<del> prediction_scores = prediction_scores[:, :-1, :]
<del> lm_labels = lm_labels[:, 1:]
<add> prediction_scores = prediction_scores[:, :-1, :].contiguous()
<add> lm_labels = lm_labels[:, 1:].contiguous()
<ide> loss_fct = CrossEntropyLoss(ignore_index=-1)
<del> seq2seq_loss = loss_fct(prediction_scores.reshape(-1, self.config.vocab_size), lm_labels.reshape(-1))
<del> outputs = (seq2seq_loss,) + outputs
<add> next_token_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1))
<add> outputs = (next_token_loss,) + outputs
<ide>
<del> return outputs # (mlm_or_seq2seq_loss), prediction_scores, (hidden_states), (attentions)
<add> return outputs # (masked_lm_loss), (next_token_loss), prediction_scores, (hidden_states), (attentions)
<ide>
<ide>
<ide> @add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
<ide><path>transformers/modeling_seq2seq.py
<ide>
<ide> class PreTrainedSeq2seq(nn.Module):
<ide> r"""
<del> :class:`~transformers.Seq2seq` is a generic model class that will be
<add> :class:`~transformers.PreTrainedSeq2seq` is a generic model class that will be
<ide> instantiated as a Seq2seq model with one of the base model classes of
<ide> the library as encoder and (optionally) as decoder when created with
<ide> the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class
<ide> def from_pretrained(
<ide> *model_args,
<ide> **kwargs
<ide> ):
<del> r""" Instantiates an encoder and a decoder from one or two base classes
<del> of the library from pre-trained model checkpoints.
<add> r""" Instantiates an encoder and a decoder from one or two base classes of the library from pre-trained model checkpoints.
<ide>
<ide>
<ide> The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
<ide> def from_pretrained(
<ide> model = PreTrainedSeq2seq.from_pretained('bert-base-uncased', 'bert-base-uncased') # initialize Bert2Bert
<ide> """
<ide>
<del> # Separate the encoder- and decoder- specific kwargs. A kwarg is
<del> # decoder-specific it the key starts with `decoder_`
<add> # keyword arguments come in 3 flavors: encoder-specific (prefixed by
<add> # `encoder_`), decoder-specific (prefixed by `decoder_`) and those
<add> # that apply to the model as a whole.
<add> # We let the specific kwargs override the common ones in case of conflict.
<ide> kwargs_encoder = {
<del> argument: value
<add> argument[len("encoder_"):]: value
<ide> for argument, value in kwargs.items()
<del> if not argument.startswith("decoder_")
<add> if argument.startswith("encoder_")
<ide> }
<ide> kwargs_decoder = {
<del> argument[len("decoder_") :]: value
<add> argument[len("decoder_"):]: value
<ide> for argument, value in kwargs.items()
<ide> if argument.startswith("decoder_")
<ide> }
<add> kwargs_common = {
<add> argument: value
<add> for argument, value in kwargs.items()
<add> if not (argument.startswith("encoder_") or argument.startswith("decoder_"))
<add> }
<add> kwargs_decoder = dict(kwargs_common, **kwargs_decoder)
<add> kwargs_encoder = dict(kwargs_common, **kwargs_encoder)
<ide>
<ide> # Load and initialize the encoder and decoder
<ide> # The distinction between encoder and decoder at the model level is made
<ide> # by the value of the flag `is_decoder` that we need to set correctly.
<del> encoder = kwargs_encoder.pop("encoder_model", None)
<add> encoder = kwargs_encoder.pop("model", None)
<ide> if encoder is None:
<del> kwargs_encoder["is_decoder"] = False
<ide> encoder = AutoModel.from_pretrained(
<ide> encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder
<ide> )
<add> encoder.config.is_decoder = False
<ide>
<ide> decoder = kwargs_decoder.pop("model", None)
<ide> if decoder is None:
<del> kwargs_decoder["is_decoder"] = True
<ide> decoder = AutoModelWithLMHead.from_pretrained(
<ide> decoder_pretrained_model_name_or_path, **kwargs_decoder
<ide> )
<add> decoder.config.is_decoder = True
<ide>
<ide> model = cls(encoder, decoder)
<ide>
<ide> def forward(self, encoder_input_ids, decoder_input_ids, **kwargs):
<ide> decoder_input_ids: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``
<ide> Indices of decoder input sequence tokens in the vocabulary.
<ide> """
<del> # Separate the encoder- and decoder- specific kwargs. A kwarg is
<del> # decoder-specific it the key starts with `decoder_`
<add> # keyword arguments come in 3 flavors: encoder-specific (prefixed by
<add> # `encoder_`), decoder-specific (prefixed by `decoder_`) and those
<add> # that apply to the model as whole.
<add> # We let the specific kwargs override the common ones in case of conflict.
<ide> kwargs_encoder = {
<del> argument: value
<add> argument[len("encoder_"):]: value
<ide> for argument, value in kwargs.items()
<del> if not argument.startswith("decoder_")
<add> if argument.startswith("encoder_")
<ide> }
<ide> kwargs_decoder = {
<del> argument[len("decoder_") :]: value
<add> argument[len("decoder_"):]: value
<ide> for argument, value in kwargs.items()
<ide> if argument.startswith("decoder_")
<ide> }
<add> kwargs_common = {
<add> argument: value
<add> for argument, value in kwargs.items()
<add> if not (argument.startswith("encoder_") or argument.startswith("decoder_"))
<add> }
<add> kwargs_decoder = dict(kwargs_common, **kwargs_decoder)
<add> kwargs_encoder = dict(kwargs_common, **kwargs_encoder)
<ide>
<ide> # Encode if needed (training, first prediction pass)
<del> encoder_hidden_states = kwargs_encoder.pop("encoder_hidden_states", None)
<add> encoder_hidden_states = kwargs_encoder.pop("hidden_states", None)
<ide> if encoder_hidden_states is None:
<ide> encoder_outputs = self.encoder(encoder_input_ids, **kwargs_encoder)
<del> encoder_hidden_states = encoder_outputs[0][
<del> -1
<del> ] # output of the encoder *stack*
<add> encoder_hidden_states = encoder_outputs[0] # output the last layer hidden state
<ide> else:
<ide> encoder_outputs = ()
<ide>
<ide> # Decode
<del> kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states[None, :, :]
<add> kwargs_decoder["encoder_hidden_states"] = encoder_hidden_states
<add> kwargs_decoder["encoder_attention_mask"] = kwargs_encoder.get("attention_mask", None)
<ide> decoder_outputs = self.decoder(decoder_input_ids, **kwargs_decoder)
<ide>
<ide> return decoder_outputs + encoder_outputs
<ide>
<ide>
<ide> class Model2Model(PreTrainedSeq2seq):
<add> r"""
<add> :class:`~transformers.Model2Model` instantiates a Seq2Seq2 model
<add> where both of the encoder and decoder are of the same family. If the
<add> name of or that path to a pretrained model is specified the encoder and
<add> the decoder will be initialized with the pretrained weight (the
<add> cross-attention will be intialized randomly if its weights are not
<add> present).
<add>
<add> It is possible to override this behavior and initialize, say, the decoder randomly
<add> by creating it beforehand as follows
<add>
<add> config = BertConfig.from_pretrained()
<add> decoder = BertForMaskedLM(config)
<add> model = Model2Model.from_pretrained('bert-base-uncased', decoder_model=decoder)
<add> """
<ide> def __init__(self, *args, **kwargs):
<ide> super(Model2Model, self).__init__(*args, **kwargs)
<ide> self.tie_weights()
<ide> def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
<ide> model = super(Model2Model, cls).from_pretrained(
<ide> encoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
<ide> decoder_pretrained_model_name_or_path=pretrained_model_name_or_path,
<add> *args,
<ide> **kwargs
<ide> )
<ide>
<del> # Some architectures require for the decoder to be initialized randomly
<del> # before fine-tuning.
<del> if kwargs.get("decoder_initialize_randomly", False):
<del> model.decoder.init_weights()
<del>
<ide> return model
<ide>
<ide>
| 7
|
Python
|
Python
|
fix failure to return monic polynomials from roots
|
0739b6b0379deecac18811af27966d4515c2ff7c
|
<ide><path>numpy/polynomial/polytemplate.py
<ide> def fromroots(roots, domain=$domain, window=$domain) :
<ide> ----------
<ide> roots : array_like
<ide> List of roots.
<add> domain : {array_like, None}, optional
<add> Domain for the resulting instance of $name. If none the domain
<add> is the interval from the smallest root to the largest. The
<add> default is $domain.
<add> window : array_like, optional
<add> Window for the resulting instance of $name. The default value
<add> is $domain.
<ide>
<ide> Returns
<ide> -------
<ide> def fromroots(roots, domain=$domain, window=$domain) :
<ide> ${nick}fromroots : equivalent function
<ide>
<ide> """
<add> [roots] = pu.as_series([roots], trim=False)
<ide> if domain is None :
<ide> domain = pu.getdomain(roots)
<del> rnew = pu.mapdomain(roots, domain, window)
<del> coef = ${nick}fromroots(rnew)
<add> deg = len(roots)
<add> off, scl = pu.mapparms(domain, window)
<add> rnew = off + scl*roots
<add> coef = ${nick}fromroots(rnew) / scl**deg
<ide> return $name(coef, domain=domain, window=window)
<ide>
<ide> @staticmethod
<ide><path>numpy/polynomial/tests/test_classes.py
<ide> def check_fromroots(Poly):
<ide> assert_almost_equal(p1(r), 0)
<ide>
<ide> # check that polynomial is monic
<del> p2 = Polynomial.cast(p1, domain=d, window=w)
<add> pdom = Polynomial.domain
<add> pwin = Polynomial.window
<add> p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
<ide> assert_almost_equal(p2.coef[-1], 1)
<ide>
<ide>
| 2
|
Python
|
Python
|
remove warning when scikit-learn isn't available
|
5565dcdd354f24a118befebb8673bf58dc2f0f51
|
<ide><path>src/transformers/data/metrics/__init__.py
<ide> # See the License for the specific language governing permissions and
<ide> # limitations under the License.
<ide>
<del>import logging
<del>
<del>
<del>logger = logging.getLogger(__name__)
<del>
<ide> try:
<ide> from scipy.stats import pearsonr, spearmanr
<ide> from sklearn.metrics import matthews_corrcoef, f1_score
<ide>
<ide> _has_sklearn = True
<ide> except (AttributeError, ImportError) as e:
<del> logger.warning("To use data.metrics please install scikit-learn. See https://scikit-learn.org/stable/index.html")
<ide> _has_sklearn = False
<ide>
<ide>
| 1
|
Javascript
|
Javascript
|
remove pipe if the dest emits error
|
49ea653363da50c76a099839b4af555cec1d06c8
|
<ide><path>lib/_stream_readable.js
<ide> Readable.prototype.pipe = function(dest, pipeOpts) {
<ide> ondrain();
<ide> });
<ide>
<add> // if the dest has an error, then stop piping into it.
<add> // however, don't suppress the throwing behavior for this.
<add> dest.once('error', function(er) {
<add> src.unpipe(dest);
<add> if (dest.listeners('error').length === 0)
<add> dest.emit('error', er);
<add> });
<add>
<ide> // tell the dest that it's being piped to
<ide> dest.emit('pipe', src);
<ide>
<ide><path>test/simple/test-stream2-pipe-error-handling.js
<add>// Copyright Joyent, Inc. and other Node contributors.
<add>//
<add>// Permission is hereby granted, free of charge, to any person obtaining a
<add>// copy of this software and associated documentation files (the
<add>// "Software"), to deal in the Software without restriction, including
<add>// without limitation the rights to use, copy, modify, merge, publish,
<add>// distribute, sublicense, and/or sell copies of the Software, and to permit
<add>// persons to whom the Software is furnished to do so, subject to the
<add>// following conditions:
<add>//
<add>// The above copyright notice and this permission notice shall be included
<add>// in all copies or substantial portions of the Software.
<add>//
<add>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
<add>// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
<add>// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
<add>// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
<add>// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
<add>// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
<add>// USE OR OTHER DEALINGS IN THE SOFTWARE.
<add>
<add>var common = require('../common');
<add>var assert = require('assert');
<add>var stream = require('stream');
<add>
<add>(function testErrorListenerCatches() {
<add> var count = 1000;
<add>
<add> var source = new stream.Readable();
<add> source._read = function(n, cb) {
<add> n = Math.min(count, n);
<add> count -= n;
<add> cb(null, new Buffer(n));
<add> };
<add>
<add> var unpipedDest;
<add> source.unpipe = function(dest) {
<add> unpipedDest = dest;
<add> stream.Readable.prototype.unpipe.call(this, dest);
<add> };
<add>
<add> var dest = new stream.Writable();
<add> dest._write = function(chunk, cb) {
<add> cb();
<add> };
<add>
<add> source.pipe(dest);
<add>
<add> var gotErr = null;
<add> dest.on('error', function(err) {
<add> gotErr = err;
<add> });
<add>
<add> var unpipedSource;
<add> dest.on('unpipe', function(src) {
<add> unpipedSource = src;
<add> });
<add>
<add> var err = new Error('This stream turned into bacon.');
<add> dest.emit('error', err);
<add> assert.strictEqual(gotErr, err);
<add> assert.strictEqual(unpipedSource, source);
<add> assert.strictEqual(unpipedDest, dest);
<add>})();
<add>
<add>(function testErrorWithoutListenerThrows() {
<add> var count = 1000;
<add>
<add> var source = new stream.Readable();
<add> source._read = function(n, cb) {
<add> n = Math.min(count, n);
<add> count -= n;
<add> cb(null, new Buffer(n));
<add> };
<add>
<add> var unpipedDest;
<add> source.unpipe = function(dest) {
<add> unpipedDest = dest;
<add> stream.Readable.prototype.unpipe.call(this, dest);
<add> };
<add>
<add> var dest = new stream.Writable();
<add> dest._write = function(chunk, cb) {
<add> cb();
<add> };
<add>
<add> source.pipe(dest);
<add>
<add> var unpipedSource;
<add> dest.on('unpipe', function(src) {
<add> unpipedSource = src;
<add> });
<add>
<add> var err = new Error('This stream turned into bacon.');
<add>
<add> var gotErr = null;
<add> try {
<add> dest.emit('error', err);
<add> } catch (e) {
<add> gotErr = e;
<add> }
<add> assert.strictEqual(gotErr, err);
<add> assert.strictEqual(unpipedSource, source);
<add> assert.strictEqual(unpipedDest, dest);
<add>})();
| 2
|
Javascript
|
Javascript
|
check gruntfile and tasks for code style
|
85d4c0133b5369b54bc9c4250989214bb2d45700
|
<ide><path>Gruntfile.js
<ide> module.exports = function( grunt ) {
<ide> }
<ide> },
<ide> jscs: {
<del> src: "src/**/*.js"
<add> src: "src/**/*.js",
<add> gruntfile: "Gruntfile.js",
<add> tasks: "build/tasks/*.js"
<ide> },
<ide> testswarm: {
<ide> tests: "ajax attributes callbacks core css data deferred dimensions effects event manipulation offset queue selector serialize support traversing Sizzle".split(" ")
| 1
|
Python
|
Python
|
handle invalid date parsing in webserver views.
|
9e25bc211f6f7bba1aff133d21fe3865dabda53d
|
<ide><path>airflow/www/views.py
<ide> def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
<ide> """Get Execution Data, Base Date & Number of runs from a Request"""
<ide> date_time = www_request.args.get('execution_date')
<ide> if date_time:
<del> date_time = timezone.parse(date_time)
<add> date_time = _safe_parse_datetime(date_time)
<ide> else:
<ide> date_time = dag.get_latest_execution_date(session=session) or timezone.utcnow()
<ide>
<ide> base_date = www_request.args.get('base_date')
<ide> if base_date:
<del> base_date = timezone.parse(base_date)
<add> base_date = _safe_parse_datetime(base_date)
<ide> else:
<ide> # The DateTimeField widget truncates milliseconds and would loose
<ide> # the first dag run. Round to next second.
<ide> def get_date_time_num_runs_dag_runs_form_data(www_request, session, dag):
<ide> }
<ide>
<ide>
<add>def _safe_parse_datetime(v):
<add> """Parse datetime and return error message for invalid dates"""
<add> try:
<add> return timezone.parse(v)
<add> except (TypeError, ParserError):
<add> abort(400, f"Invalid datetime: {v!r}")
<add>
<add>
<ide> def task_group_to_grid(task_item_or_group, dag, dag_runs, tis, session):
<ide> """
<ide> Create a nested dict representation of this TaskGroup and its children used to construct
<ide> def rendered_templates(self, session):
<ide> task_id = request.args.get('task_id')
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<ide> form = DateTimeForm(data={'execution_date': dttm})
<ide> root = request.args.get('root', '')
<ide>
<ide> def rendered_k8s(self, session: Session = NEW_SESSION):
<ide> dag_id = request.args.get('dag_id')
<ide> task_id = request.args.get('task_id')
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<add>
<ide> form = DateTimeForm(data={'execution_date': dttm})
<ide> root = request.args.get('root', '')
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> def log(self, session=None):
<ide> task_id = request.args.get('task_id')
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date) if execution_date else None
<add>
<add> if execution_date:
<add> dttm = _safe_parse_datetime(execution_date)
<add> else:
<add> dttm = None
<add>
<ide> form = DateTimeForm(data={'execution_date': dttm})
<ide> dag_model = DagModel.get_dagmodel(dag_id)
<ide>
<ide> def redirect_to_external_log(self, session=None):
<ide> dag_id = request.args.get('dag_id')
<ide> task_id = request.args.get('task_id')
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> try_number = request.args.get('try_number', 1)
<ide>
<ide> def task(self, session):
<ide> dag_id = request.args.get('dag_id')
<ide> task_id = request.args.get('task_id')
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> form = DateTimeForm(data={'execution_date': dttm})
<ide> root = request.args.get('root', '')
<ide> def xcom(self, session=None):
<ide> # Carrying execution_date through, even though it's irrelevant for
<ide> # this context
<ide> execution_date = request.args.get('execution_date')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<add>
<ide> form = DateTimeForm(data={'execution_date': dttm})
<ide> root = request.args.get('root', '')
<ide> dag = DagModel.get_dagmodel(dag_id)
<ide> def clear(self):
<ide> map_indexes = request.form.getlist('map_index', type=int)
<ide>
<ide> execution_date = request.form.get('execution_date')
<del> execution_date = timezone.parse(execution_date)
<add> execution_date = _safe_parse_datetime(execution_date)
<ide> confirmed = request.form.get('confirmed') == "true"
<ide> upstream = request.form.get('upstream') == "true"
<ide> downstream = request.form.get('downstream') == "true"
<ide> def grid(self, dag_id, session=None):
<ide> num_runs = conf.getint('webserver', 'default_dag_run_display_number')
<ide>
<ide> try:
<del> base_date = timezone.parse(request.args["base_date"])
<add> base_date = _safe_parse_datetime(request.args["base_date"])
<ide> except (KeyError, ValueError):
<ide> base_date = dag.get_latest_execution_date() or timezone.utcnow()
<ide>
<ide> def graph(self, dag_id, session=None):
<ide> edges = dag_edges(dag)
<ide>
<ide> dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
<add>
<ide> dt_nr_dr_data['arrange'] = arrange
<ide> dttm = dt_nr_dr_data['dttm']
<ide> dag_run = dag.get_dagrun(execution_date=dttm)
<ide> def duration(self, dag_id, session=None):
<ide> num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
<ide>
<ide> if base_date:
<del> base_date = timezone.parse(base_date)
<add> base_date = _safe_parse_datetime(base_date)
<ide> else:
<ide> base_date = dag.get_latest_execution_date() or timezone.utcnow()
<ide>
<ide> def tries(self, dag_id, session=None):
<ide> num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
<ide>
<ide> if base_date:
<del> base_date = timezone.parse(base_date)
<add> base_date = _safe_parse_datetime(base_date)
<ide> else:
<ide> base_date = dag.get_latest_execution_date() or timezone.utcnow()
<ide>
<ide> def landing_times(self, dag_id, session=None):
<ide> num_runs = request.args.get('num_runs', default=default_dag_run, type=int)
<ide>
<ide> if base_date:
<del> base_date = timezone.parse(base_date)
<add> base_date = _safe_parse_datetime(base_date)
<ide> else:
<ide> base_date = dag.get_latest_execution_date() or timezone.utcnow()
<ide>
<ide> def extra_links(self, session: "Session" = NEW_SESSION):
<ide> map_index = request.args.get('map_index', -1, type=int)
<ide> execution_date = request.args.get('execution_date')
<ide> link_name = request.args.get('link_name')
<del> dttm = timezone.parse(execution_date)
<add> dttm = _safe_parse_datetime(execution_date)
<ide> dag = current_app.dag_bag.get_dag(dag_id)
<ide>
<ide> if not dag or task_id not in dag.task_ids:
<ide> def task_instances(self):
<ide>
<ide> dttm = request.args.get('execution_date')
<ide> if dttm:
<del> dttm = timezone.parse(dttm)
<add> dttm = _safe_parse_datetime(dttm)
<ide> else:
<ide> response = jsonify({'error': f"Invalid execution_date {dttm}"})
<ide> response.status_code = 400
<ide><path>tests/www/views/test_views.py
<ide> def test_get_task_stats_from_query():
<ide>
<ide> data = get_task_stats_from_query(query_data)
<ide> assert data == expected_data
<add>
<add>
<add>@pytest.mark.parametrize(
<add> "url, content",
<add> [
<add> (
<add> '/rendered-templates?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> '/log?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> '/redirect_to_external_log?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> '/task?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/graph?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/graph?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/duration?base_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/tries?base_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/landing-times?base_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'dags/example_bash_operator/gantt?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> (
<add> 'extra_links?execution_date=invalid',
<add> "Invalid datetime: 'invalid'",
<add> ),
<add> ],
<add>)
<add>def test_invalid_dates(app, admin_client, url, content):
<add> """Test invalid date format doesn't crash page."""
<add> resp = admin_client.get(url, follow_redirects=True)
<add>
<add> assert resp.status_code == 400
<add> assert content in resp.get_data().decode()
| 2
|
PHP
|
PHP
|
adjust interface contracts as per implementation
|
eab2d42381f8347014c618e325072753517beaaa
|
<ide><path>src/Database/Query.php
<ide> public function type(): string
<ide> * $expression = $query->newExpr('Table.column = Table2.column'); // Return a raw SQL expression
<ide> * ```
<ide> *
<del> * @param mixed $rawExpression A string, array or anything you want wrapped in an expression object
<add> * @param string|array|\Cake\Database\ExpressionInterface|null $rawExpression A string, array or anything you want wrapped in an expression object
<ide> * @return \Cake\Database\Expression\QueryExpression
<ide> */
<ide> public function newExpr($rawExpression = null): QueryExpression
<ide><path>src/Datasource/QueryInterface.php
<ide> interface QueryInterface
<ide> * If `true` is passed in the second argument, any previous selections will
<ide> * be overwritten with the list passed in the first argument.
<ide> *
<del> * @param mixed $fields Fields to be added to the list.
<add> * @param array|\Cake\Database\ExpressionInterface|string|callable|\Cake\ORM\Table|\Cake\ORM\Association $fields Fields.
<ide> * @param bool $overwrite whether to reset fields with passed list or not
<ide> * @return $this
<ide> */
<ide> public function count(): int;
<ide> * $query->limit($query->newExpr()->add(['1 + 1'])); // LIMIT (1 + 1)
<ide> * ```
<ide> *
<del> * @param int|mixed $num number of records to be returned
<add> * @param int|\Cake\Database\ExpressionInterface|null $num number of records to be returned
<ide> * @return $this
<ide> */
<ide> public function limit($num);
<ide> public function limit($num);
<ide> * $query->offset($query->newExpr()->add(['1 + 1'])); // OFFSET (1 + 1)
<ide> * ```
<ide> *
<del> * @param mixed $num number of records to be skipped
<add> * @param int|\Cake\Database\ExpressionInterface|null $num number of records to be skipped
<ide> * @return $this
<ide> */
<ide> public function offset($num);
<ide> public function offset($num);
<ide> * If you need to set complex expressions as order conditions, you
<ide> * should use `orderAsc()` or `orderDesc()`.
<ide> *
<del> * @param array|string $fields fields to be added to the list
<add> * @param array|\Cake\Database\ExpressionInterface|\Closure|string $fields fields to be added to the list
<ide> * @param bool $overwrite whether to reset order with field list or not
<ide> * @return $this
<ide> */
| 2
|
Javascript
|
Javascript
|
fix typo in virtualizedlist-test.js
|
8b1b7f71267831d2ad50730607ef4c4a57cd651e
|
<ide><path>Libraries/Lists/__tests__/VirtualizedList-test.js
<ide> describe('VirtualizedList', () => {
<ide> // Silence the React error boundary warning; we expect an uncaught error.
<ide> const consoleError = console.error;
<ide> jest.spyOn(console, 'error').mockImplementation(message => {
<del> if (message.startsWith('The above error occured in the ')) {
<add> if (message.startsWith('The above error occurred in the ')) {
<ide> return;
<ide> }
<ide> consoleError(message);
<ide> describe('VirtualizedList', () => {
<ide> const errors = [];
<ide> jest.spyOn(console, 'error').mockImplementation((...args) => {
<ide> // Silence the DEV-only React error boundary warning.
<del> if ((args[0] || '').startsWith('The above error occured in the ')) {
<add> if ((args[0] || '').startsWith('The above error occurred in the ')) {
<ide> return;
<ide> }
<ide> errors.push(args);
| 1
|
Text
|
Text
|
use adjective "too" instead of "to"
|
7f37074c33f7425150f23ecea25f13f6b15abb65
|
<ide><path>docs/_posts/2014-10-17-community-roundup-23.md
<ide> Yahoo is converting Yahoo Mail to React and Flux and in the process, they open s
<ide> >
<ide> > - There should be only one channel for all state changes: The Dispatcher. This makes debugging easy because it just requires a single console.log in the dispatcher to observe every single state change trigger.
<ide> >
<del>> - Asynchronously executed callbacks should not leak into Stores. The consequences of it are just to hard to fully foresee. This leads to elusive bugs. Stores should only execute synchronous code. Otherwise they are too hard to understand.
<add>> - Asynchronously executed callbacks should not leak into Stores. The consequences of it are just too hard to fully foresee. This leads to elusive bugs. Stores should only execute synchronous code. Otherwise they are too hard to understand.
<ide> >
<ide> > - Avoiding actions firing other actions makes your app simple. We use the newest Dispatcher implementation from Facebook that does not allow a new dispatch while dispatching. It forces you to do things right.
<ide> >
| 1
|
Python
|
Python
|
add a test case for paramikosshclient
|
941b812342dd9bff1ff010d20b46f7d94ed4b3ff
|
<ide><path>test/compute/test_ssh_client.py
<add># -*- coding: utf-8 -*-
<add># Licensed to the Apache Software Foundation (ASF) under one or more§
<add># contributor license agreements. See the NOTICE file distributed with
<add># this work for additional information regarding copyright ownership.
<add># The ASF licenses this file to You under the Apache License, Version 2.0
<add># (the "License"); you may not use this file except in compliance with
<add># the License. You may obtain a copy of the License at
<add>#
<add># http://www.apache.org/licenses/LICENSE-2.0
<add>#
<add># Unless required by applicable law or agreed to in writing, software
<add># distributed under the License is distributed on an "AS IS" BASIS,
<add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add># See the License for the specific language governing permissions and
<add># limitations under the License.
<add>
<add>import sys
<add>import unittest
<add>
<add>import libcloud.compute.ssh
<add>
<add>from mock import Mock
<add>
<add>
<add>class ParamikoSSHClientTests(unittest.TestCase):
<add>
<add> def test_either_key_or_password_must_be_provided(self):
<add> libcloud.compute.ssh.paramiko = Mock()
<add> client = libcloud.compute.ssh.ParamikoSSHClient(hostname='foo.bar.com')
<add>
<add> try:
<add> client.connect()
<add> except Exception, e:
<add> self.assertTrue(str(e).find('must specify either password or')
<add> != -1)
<add> else:
<add> self.fail('Exception was not thrown')
<add>
<add>
<add>if __name__ == '__main__':
<add> sys.exit(unittest.main())
| 1
|
PHP
|
PHP
|
allow multiple attributes on sometimes
|
52dfddaaea7de0be0ad79a0a515c88a7397b083a
|
<ide><path>src/Illuminate/Validation/Validator.php
<ide> public function sometimes($attribute, $rules, $callback)
<ide> {
<ide> $payload = new Fluent(array_merge($this->data, $this->files));
<ide>
<del> if (call_user_func($callback, $payload)) $this->mergeRules($attribute, $rules);
<add> if (call_user_func($callback, $payload))
<add> {
<add> foreach ((array) $attribute as $key)
<add> {
<add> $this->mergeRules($key, $rules);
<add> }
<add> }
<ide> }
<ide>
<ide> /**
| 1
|
Javascript
|
Javascript
|
remove string literal from assertions
|
92de0eb4197c951703aeb4759a3ef9c6897c10ee
|
<ide><path>test/parallel/test-crypto-sign-verify.js
<ide> common.expectsError(
<ide> .update('Test')
<ide> .update('123')
<ide> .verify(certPem, s1, 'base64');
<del> assert.strictEqual(verified, true, 'sign and verify (base 64)');
<add> assert.strictEqual(verified, true);
<ide> }
<ide>
<ide> {
<ide> common.expectsError(
<ide> .update('Test')
<ide> .update('123')
<ide> .verify(certPem, s2, 'latin1');
<del> assert.strictEqual(verified, true, 'sign and verify (latin1)');
<add> assert.strictEqual(verified, true);
<ide>
<ide> const verStream = crypto.createVerify('SHA256');
<ide> verStream.write('Tes');
<ide> verStream.write('t12');
<ide> verStream.end('3');
<ide> verified = verStream.verify(certPem, s2, 'latin1');
<del> assert.strictEqual(verified, true, 'sign and verify (stream)');
<add> assert.strictEqual(verified, true);
<ide> }
<ide>
<ide> {
<ide> common.expectsError(
<ide> .update('Test')
<ide> .update('123')
<ide> .verify(certPem, s3);
<del> assert.strictEqual(verified, true, 'sign and verify (buffer)');
<add> assert.strictEqual(verified, true);
<ide>
<ide> const verStream = crypto.createVerify('SHA1');
<ide> verStream.write('Tes');
<ide> verStream.write('t12');
<ide> verStream.end('3');
<ide> verified = verStream.verify(certPem, s3);
<del> assert.strictEqual(verified, true, 'sign and verify (stream)');
<add> assert.strictEqual(verified, true);
<ide> }
<ide>
<ide> // Special tests for RSA_PKCS1_PSS_PADDING
<ide> common.expectsError(
<ide> }, s4);
<ide> const saltLengthCorrect = getEffectiveSaltLength(signSaltLength) ===
<ide> getEffectiveSaltLength(verifySaltLength);
<del> assert.strictEqual(verified, saltLengthCorrect, 'verify (PSS)');
<add> assert.strictEqual(verified, saltLengthCorrect);
<ide> });
<ide>
<ide> // Verification using RSA_PSS_SALTLEN_AUTO should always work
<ide> common.expectsError(
<ide> padding: crypto.constants.RSA_PKCS1_PSS_PADDING,
<ide> saltLength: crypto.constants.RSA_PSS_SALTLEN_AUTO
<ide> }, s4);
<del> assert.strictEqual(verified, true, 'verify (PSS with SALTLEN_AUTO)');
<add> assert.strictEqual(verified, true);
<ide>
<ide> // Verifying an incorrect message should never work
<ide> verified = crypto.createVerify(algo)
<ide> common.expectsError(
<ide> padding: crypto.constants.RSA_PKCS1_PSS_PADDING,
<ide> saltLength: crypto.constants.RSA_PSS_SALTLEN_AUTO
<ide> }, s4);
<del> assert.strictEqual(verified, false, 'verify (PSS, incorrect)');
<add> assert.strictEqual(verified, false);
<ide> }
<ide> });
<ide> }
<ide> common.expectsError(
<ide> padding: crypto.constants.RSA_PKCS1_PSS_PADDING,
<ide> saltLength: vector.salt.length / 2
<ide> }, vector.signature, 'hex');
<del> assert.strictEqual(verified, true, 'verify (PSS)');
<add> assert.strictEqual(verified, true);
<ide> }
<ide>
<ide> const examples = JSON.parse(fixtures.readSync('pss-vectors.json', 'utf8'));
| 1
|
Javascript
|
Javascript
|
clarify what tests verify
|
08aed3ead1af861efc605ffb716bf6e96d7f71fd
|
<ide><path>test/createStore.spec.js
<ide> describe('createStore', () => {
<ide> expect(listenerC.calls.length).toBe(2)
<ide> })
<ide>
<del> it('removes listeners NOT immediately when unsubscribe is called', () => {
<add> it('delays unsubscribe until the end of current dispatch', () => {
<ide> const store = createStore(reducers.todos)
<ide>
<ide> const unsubscribeHandles = []
<del> const doUnsubscribeAll = () => unsubscribeHandles.forEach(unsubscribe => unsubscribe() )
<del>
<add> const doUnsubscribeAll = () => unsubscribeHandles.forEach(
<add> unsubscribe => unsubscribe()
<add> )
<add>
<ide> const listener1 = expect.createSpy(() => {})
<ide> const listener2 = expect.createSpy(() => {})
<ide> const listener3 = expect.createSpy(() => {})
<ide> describe('createStore', () => {
<ide> unsubscribeHandles.push(store.subscribe(() => listener3()))
<ide>
<ide> store.dispatch(unknownAction())
<add> expect(listener1.calls.length).toBe(1)
<add> expect(listener2.calls.length).toBe(1)
<add> expect(listener3.calls.length).toBe(1)
<add>
<ide> store.dispatch(unknownAction())
<ide> expect(listener1.calls.length).toBe(1)
<ide> expect(listener2.calls.length).toBe(1)
<del> // listener3 is called! decided in #1180
<ide> expect(listener3.calls.length).toBe(1)
<ide> })
<ide>
<del> it('does not fire immediately if a listener is added inside another listener', () => {
<add> it('delays subscribe until the end of current dispatch', () => {
<ide> const store = createStore(reducers.todos)
<ide>
<ide> const listener1 = expect.createSpy(() => {})
<ide> describe('createStore', () => {
<ide> })
<ide>
<ide> store.dispatch(unknownAction())
<add> expect(listener1.calls.length).toBe(1)
<add> expect(listener2.calls.length).toBe(1)
<add> expect(listener3.calls.length).toBe(0)
<add>
<ide> store.dispatch(unknownAction())
<ide> expect(listener1.calls.length).toBe(2)
<ide> expect(listener2.calls.length).toBe(2)
| 1
|
Python
|
Python
|
set version to v3.3.0.dev0
|
d56b1400d2899bc85ab8108e2dbf2e4885c4e6ce
|
<ide><path>spacy/about.py
<ide> # fmt: off
<ide> __title__ = "spacy"
<del>__version__ = "3.2.2"
<add>__version__ = "3.3.0.dev0"
<ide> __download_url__ = "https://github.com/explosion/spacy-models/releases/download"
<ide> __compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
<ide> __projects__ = "https://github.com/explosion/projects"
| 1
|
PHP
|
PHP
|
fix phpcs and psalm errors
|
e8f353c8f39ea9bd9660ca22733f6e884853c0de
|
<ide><path>src/TestSuite/TestCase.php
<ide> use Cake\TestSuite\Constraint\EventFired;
<ide> use Cake\TestSuite\Constraint\EventFiredWith;
<ide> use Cake\Utility\Inflector;
<add>use LogicException;
<ide> use PHPUnit\Framework\TestCase as BaseTestCase;
<ide> use ReflectionClass;
<ide> use ReflectionException;
<ide> public function loadFixtures(): void
<ide> */
<ide> public function loadRoutes(?array $appArgs = null): void
<ide> {
<del> $appArgs = $appArgs === null ? [CONFIG] : $appArgs;
<add> $appArgs = $appArgs ?? [CONFIG];
<ide> $className = Configure::read('App.namespace') . '\\Application';
<ide> try {
<ide> $reflect = new ReflectionClass($className);
<del> /** @var \Cake\Core\HttpApplicationInterface $app */
<add> /** @var \Cake\Routing\RoutingApplicationInterface $app */
<ide> $app = $reflect->newInstanceArgs($appArgs);
<ide> } catch (ReflectionException $e) {
<ide> throw new LogicException(sprintf('Cannot load "%s" to load routes from.', $className), null, $e);
| 1
|
Javascript
|
Javascript
|
return zeros for disconnected/hidden elements
|
40dcc767640c41a4387a343f1ef53ac57ed631c5
|
<ide><path>src/offset.js
<ide> jQuery.offset = {
<ide> elem.style.position = "relative";
<ide> }
<ide>
<del> curOffset = curElem.offset() || { top: 0, left: 0 };
<add> curOffset = curElem.offset();
<ide> curCSSTop = jQuery.css( elem, "top" );
<ide> curCSSLeft = jQuery.css( elem, "left" );
<ide> calculatePosition = ( position === "absolute" || position === "fixed" ) &&
<ide> jQuery.fn.extend({
<ide> left: rect.left + win.pageXOffset - docElem.clientLeft
<ide> };
<ide> }
<add>
<add> // Return zeros for disconnected and hidden elements (gh-2310)
<add> return rect;
<ide> },
<ide>
<ide> position: function() {
<ide><path>test/unit/offset.js
<ide> test("empty set", function() {
<ide> });
<ide>
<ide> test("disconnected element", function() {
<del> expect(1);
<del>
<del> var result;
<add> expect( 2 );
<ide>
<del> try {
<del> result = jQuery( document.createElement("div") ).offset();
<del> } catch ( e ) {}
<add> var result = jQuery( document.createElement( "div" ) ).offset();
<ide>
<del> ok( !result, "no position for disconnected element" );
<add> equal( result.top, 0, "Retrieving offset on disconnected elements returns zeros (gh-2310)" );
<add> equal( result.left, 0, "Retrieving offset on disconnected elements returns zeros (gh-2310)" );
<ide> });
<ide>
<ide> test("hidden (display: none) element", function() {
<del> expect(1);
<del>
<del> var result,
<del> node = jQuery("<div style='display: none' />").appendTo("#qunit-fixture");
<add> expect( 2 );
<ide>
<del> try {
<add> var node = jQuery("<div style='display: none' />").appendTo("#qunit-fixture"),
<ide> result = node.offset();
<del> } catch ( e ) {}
<ide>
<ide> node.remove();
<ide>
<del> ok( !result, "no position for hidden (display: none) element" );
<add> equal( result.top, 0, "Retrieving offset on hidden elements returns zeros (gh-2310)" );
<add> equal( result.left, 0, "Retrieving offset on hidden elements returns zeros (gh-2310)" );
<ide> });
<ide>
<ide> testIframe("offset/absolute", "absolute", function($, iframe) {
| 2
|
Javascript
|
Javascript
|
improve error message for module_not_found
|
a02e3e2d5f1f96f3c408270d45935afdd5d1fffc
|
<ide><path>lib/internal/modules/cjs/loader.js
<ide> Module._resolveFilename = function(request, parent, isMain, options) {
<ide> cursor = cursor.parent) {
<ide> requireStack.push(cursor.filename || cursor.id);
<ide> }
<add> let message = `Cannot find module '${request}'`;
<add> if (requireStack.length > 0) {
<add> message = message + '\nRequire stack:\n- ' + requireStack.join('\n- ');
<add> }
<ide> // eslint-disable-next-line no-restricted-syntax
<del> var err = new Error(`Cannot find module '${request}'`);
<add> var err = new Error(message);
<ide> err.code = 'MODULE_NOT_FOUND';
<ide> err.requireStack = requireStack;
<ide> throw err;
<ide><path>test/fixtures/require-resolve.js
<ide> assert.strictEqual(
<ide> // Verify that existing paths are removed.
<ide> assert.throws(() => {
<ide> require.resolve('bar', { paths: [] })
<del>}, /^Error: Cannot find module 'bar'$/);
<add>}, /^Error: Cannot find module 'bar'/);
<ide>
<ide> // Verify that resolution path can be overwritten.
<ide> {
<ide> // three.js cannot be loaded from this file by default.
<ide> assert.throws(() => {
<ide> require.resolve('three')
<del> }, /^Error: Cannot find module 'three'$/);
<add> }, /^Error: Cannot find module 'three'/);
<ide>
<ide> // If the nested-index directory is provided as a resolve path, 'three'
<ide> // cannot be found because nested-index is used as a starting point and not
<ide> // a searched directory.
<ide> assert.throws(() => {
<ide> require.resolve('three', { paths: [nestedIndex] })
<del> }, /^Error: Cannot find module 'three'$/);
<add> }, /^Error: Cannot find module 'three'/);
<ide>
<ide> // Resolution from nested index directory also checks node_modules.
<ide> assert.strictEqual(
<ide><path>test/parallel/test-internal-modules.js
<ide> const assert = require('assert');
<ide>
<ide> assert.throws(function() {
<ide> require('internal/freelist');
<del>}, /^Error: Cannot find module 'internal\/freelist'$/);
<add>}, /^Error: Cannot find module 'internal\/freelist'/);
<ide>
<ide> assert.strictEqual(
<ide> require(fixtures.path('internal-modules')),
<ide><path>test/parallel/test-loaders-hidden-from-users.js
<ide> common.expectsError(
<ide> require('internal/bootstrap/loaders');
<ide> }, {
<ide> code: 'MODULE_NOT_FOUND',
<del> message: 'Cannot find module \'internal/bootstrap/loaders\''
<add> message: /Cannot find module 'internal\/bootstrap\/loaders'/
<ide> }
<ide> );
<ide>
<ide> common.expectsError(
<ide> require('owo');
<ide> }, {
<ide> code: 'MODULE_NOT_FOUND',
<del> message: 'Cannot find module \'owo\''
<add> message: /Cannot find module 'owo'/
<ide> }
<ide> );
<ide><path>test/parallel/test-module-loading-error.js
<ide> common.expectsError(
<ide> message: 'The argument \'id\' must be a non-empty string. Received \'\''
<ide> });
<ide>
<del>common.expectsError(
<add>assert.throws(
<ide> () => { require('../fixtures/packages/is-dir'); },
<ide> {
<ide> code: 'MODULE_NOT_FOUND',
<del> message: 'Cannot find module \'../fixtures/packages/is-dir\''
<del> });
<add> message: /Cannot find module '\.\.\/fixtures\/packages\/is-dir'/
<add> }
<add>);
<ide><path>test/parallel/test-module-multi-extensions.js
<ide> fs.writeFileSync(dotfileWithExtension, 'console.log(__filename);', 'utf8');
<ide> require(modulePath);
<ide> assert.throws(
<ide> () => require(`${modulePath}.foo`),
<del> new Error(`Cannot find module '${modulePath}.foo'`)
<add> (err) => err.message.startsWith(`Cannot find module '${modulePath}.foo'`)
<ide> );
<ide> require(`${modulePath}.foo.bar`);
<ide> delete require.cache[file];
<ide> fs.writeFileSync(dotfileWithExtension, 'console.log(__filename);', 'utf8');
<ide> const modulePath = path.join(tmpdir.path, 'test-extensions');
<ide> assert.throws(
<ide> () => require(modulePath),
<del> new Error(`Cannot find module '${modulePath}'`)
<add> (err) => err.message.startsWith(`Cannot find module '${modulePath}'`)
<ide> );
<ide> delete require.cache[file];
<ide> Module._pathCache = Object.create(null);
<ide> fs.writeFileSync(dotfileWithExtension, 'console.log(__filename);', 'utf8');
<ide> const modulePath = path.join(tmpdir.path, 'test-extensions.foo');
<ide> assert.throws(
<ide> () => require(modulePath),
<del> new Error(`Cannot find module '${modulePath}'`)
<add> (err) => err.message.startsWith(`Cannot find module '${modulePath}'`)
<ide> );
<ide> delete require.extensions['.foo.bar'];
<ide> Module._pathCache = Object.create(null);
<ide><path>test/parallel/test-repl.js
<ide> const errorTests = [
<ide> expect: [
<ide> 'Thrown:',
<ide> /^{ Error: Cannot find module 'internal\/repl'/,
<add> /^Require stack:/,
<add> /^- <repl>/,
<ide> /^ at .*/,
<ide> /^ at .*/,
<ide> /^ at .*/,
<ide><path>test/sequential/test-module-loading.js
<ide> require('../fixtures/node_modules/foo');
<ide> assert.ok(my_path.path_func instanceof Function);
<ide> // this one does not exist and should throw
<ide> assert.throws(function() { require('./utils'); },
<del> /^Error: Cannot find module '\.\/utils'$/);
<add> /^Error: Cannot find module '\.\/utils'/);
<ide> }
<ide>
<ide> let errorThrown = false;
<ide> assert.strictEqual(require('../fixtures/registerExt2').custom, 'passed');
<ide> assert.strictEqual(require('../fixtures/foo').foo, 'ok');
<ide>
<ide> // Should not attempt to load a directory
<del>try {
<del> tmpdir.refresh();
<del> require(tmpdir.path);
<del>} catch (err) {
<del> assert.strictEqual(err.message, `Cannot find module '${tmpdir.path}'`);
<del>}
<add>assert.throws(
<add> () => {
<add> tmpdir.refresh();
<add> require(tmpdir.path);
<add> },
<add> (err) => err.message.startsWith(`Cannot find module '${tmpdir.path}`)
<add>);
<ide>
<ide> {
<ide> // Check load order is as expected
| 8
|
Python
|
Python
|
update jax version and re-enable some tests
|
b25b92ac4ffb8b66fb517f8888cbdc37075a9fd7
|
<ide><path>setup.py
<ide> "importlib_metadata",
<ide> "ipadic>=1.0.0,<2.0",
<ide> "isort>=5.5.4",
<del> "jax>=0.2.8",
<add> "jax>=0.2.8,!=0.3.2",
<ide> "jaxlib>=0.1.65",
<ide> "jieba",
<ide> "nltk",
<ide><path>src/transformers/dependency_versions_table.py
<ide> "importlib_metadata": "importlib_metadata",
<ide> "ipadic": "ipadic>=1.0.0,<2.0",
<ide> "isort": "isort>=5.5.4",
<del> "jax": "jax>=0.2.8",
<add> "jax": "jax>=0.2.8,!=0.3.2",
<ide> "jaxlib": "jaxlib>=0.1.65",
<ide> "jieba": "jieba",
<ide> "nltk": "nltk",
<ide><path>tests/speech_encoder_decoder/test_modeling_flax_speech_encoder_decoder.py
<ide> def test_flaxwav2vec2gpt2_pt_flax_equivalence(self):
<ide> self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch")
<ide> self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2)
<ide>
<del> @unittest.skip("Re-enable this test once this issue is fixed: https://github.com/google/jax/issues/9941")
<del> def test_encoder_decoder_model_from_encoder_decoder_pretrained(self):
<del> pass
<del>
<ide>
<ide> @require_flax
<ide> class FlaxWav2Vec2BartModelTest(FlaxEncoderDecoderMixin, unittest.TestCase):
<ide> def test_flaxwav2vec2bart_pt_flax_equivalence(self):
<ide>
<ide> self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch")
<ide> self.assert_almost_equals(fx_logits, pt_logits_loaded.numpy(), 4e-2)
<del>
<del> @unittest.skip("Re-enable this test once this issue is fixed: https://github.com/google/jax/issues/9941")
<del> def test_encoder_decoder_model_from_encoder_decoder_pretrained(self):
<del> pass
| 3
|
Javascript
|
Javascript
|
add env to failure message
|
28e6cab52e65e3d9cb672db6ea51193622f7883b
|
<ide><path>test/parallel/test-child-process-default-options.js
<ide> child.stdout.on('data', function(chunk) {
<ide>
<ide> process.on('exit', function() {
<ide> assert.ok(response.includes('HELLO=WORLD'),
<del> 'spawn did not use process.env as default');
<add> 'spawn did not use process.env as default' +
<add> `(process.env.HELLO = ${process.env.HELLO})`);
<ide> });
| 1
|
Text
|
Text
|
assign missing deprecation code
|
76ba9503a2aa1e2f4a304c4d2c60756cfa7ee09e
|
<ide><path>doc/api/deprecations.md
<ide> written twice. This introduces a race condition between threads, and is a
<ide> potential security vulnerability. There is no safe, cross-platform alternative
<ide> API.
<ide>
<del><a id="DEP0XXX"></a>
<del>### DEP0XXX: Use `request.destroy()` instead of `request.abort()`
<add><a id="DEP0140"></a>
<add>### DEP0140: Use `request.destroy()` instead of `request.abort()`
<ide> <!-- YAML
<ide> changes:
<ide> - version: REPLACEME
| 1
|
PHP
|
PHP
|
update docblocks for eloquent model
|
a50c1d0fc8a1d403cacad370cb74c9cf5d15a0f5
|
<ide><path>src/Illuminate/Database/Eloquent/Model.php
<ide> public static function getGlobalScope($scope)
<ide> /**
<ide> * Get the global scopes for this class instance.
<ide> *
<del> * @return array
<add> * @return \Illuminate\Database\Eloquent\ScopeInterface[]
<ide> */
<ide> public function getGlobalScopes()
<ide> {
<ide> protected static function firstByAttributes($attributes)
<ide> /**
<ide> * Begin querying the model.
<ide> *
<del> * @return \Illuminate\Database\Eloquent\Builder|static
<add> * @return \Illuminate\Database\Eloquent\Builder
<ide> */
<ide> public static function query()
<ide> {
<ide> public static function query()
<ide> * Begin querying the model on a given connection.
<ide> *
<ide> * @param string $connection
<del> * @return \Illuminate\Database\Eloquent\Builder|static
<add> * @return \Illuminate\Database\Eloquent\Builder
<ide> */
<ide> public static function on($connection = null)
<ide> {
<ide> public function freshTimestampString()
<ide> /**
<ide> * Get a new query builder for the model's table.
<ide> *
<del> * @return \Illuminate\Database\Eloquent\Builder|static
<add> * @return \Illuminate\Database\Eloquent\Builder
<ide> */
<ide> public function newQuery()
<ide> {
<ide> public function applyGlobalScopes($builder)
<ide> * Remove all of the global scopes from an Eloquent builder.
<ide> *
<ide> * @param \Illuminate\Database\Eloquent\Builder $builder
<del> * @return void
<add> * @return \Illuminate\Database\Eloquent\Builder
<ide> */
<ide> public function removeGlobalScopes($builder)
<ide> {
| 1
|
PHP
|
PHP
|
remove attribute filling from pivot model
|
a73812999ad2bcb5926cbdd296c723d9635e304c
|
<ide><path>src/Illuminate/Database/Eloquent/Relations/Pivot.php
<ide> public static function fromAttributes(Model $parent, $attributes, $table, $exist
<ide> */
<ide> public static function fromRawAttributes(Model $parent, $attributes, $table, $exists = false)
<ide> {
<del> $instance = static::fromAttributes($parent, $attributes, $table, $exists);
<add> $instance = static::fromAttributes($parent, [], $table, $exists);
<ide>
<ide> $instance->setRawAttributes($attributes, true);
<ide>
<ide><path>tests/Database/DatabaseEloquentPivotTest.php
<ide> public function testMutatorsAreCalledFromConstructor()
<ide> $this->assertTrue($pivot->getMutatorCalled());
<ide> }
<ide>
<del> public function testFromRawAttributesDoesNotDoubleMutate()
<add> public function testFromRawAttributesDoesNotMutate()
<ide> {
<ide> $parent = m::mock('Illuminate\Database\Eloquent\Model[getConnectionName]');
<ide> $parent->shouldReceive('getConnectionName')->once()->andReturn('connection');
<ide>
<del> $pivot = DatabaseEloquentPivotTestJsonCastStub::fromRawAttributes($parent, ['foo' => json_encode(['name' => 'Taylor'])], 'table', true);
<add> $pivot = DatabaseEloquentPivotTestMutatorStub::fromRawAttributes($parent, ['foo' => 'bar'], 'table', true);
<ide>
<del> $this->assertEquals(['name' => 'Taylor'], $pivot->foo);
<add> $this->assertFalse($pivot->getMutatorCalled());
<ide> }
<ide>
<ide> public function testPropertiesUnchangedAreNotDirty()
<ide> public function getMutatorCalled()
<ide> return $this->mutatorCalled;
<ide> }
<ide> }
<del>
<del>class DatabaseEloquentPivotTestJsonCastStub extends \Illuminate\Database\Eloquent\Relations\Pivot
<del>{
<del> protected $casts = [
<del> 'foo' => 'json',
<del> ];
<del>}
| 2
|
Python
|
Python
|
fix oss tests
|
1a3ff04f9d7ea826c0c4c176423d8586ae20f65f
|
<ide><path>libcloud/test/storage/test_oss.py
<ide> from libcloud.storage.drivers.oss import OSSStorageDriver
<ide> from libcloud.storage.drivers.oss import CHUNK_SIZE
<ide> from libcloud.storage.drivers.dummy import DummyIterator
<del>from libcloud.test import MockHttp, generate_random_data # pylint: disable-msg=E0611
<add>from libcloud.test import MockHttp, generate_random_data, make_response # pylint: disable-msg=E0611
<ide> from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
<ide> from libcloud.test.secrets import STORAGE_OSS_PARAMS
<ide>
<ide> def test_object_with_chinese_name(self):
<ide> self.assertTrue(obj.__repr__() is not None)
<ide>
<ide>
<del>class OSSMockHttp(MockHttp):
<add>class OSSMockHttp(MockHttp, unittest.TestCase):
<ide>
<ide> fixtures = StorageFileFixtures('oss')
<ide> base_headers = {}
<ide> def _foo_bar_object_not_found(self, method, url, body, headers):
<ide> headers,
<ide> httplib.responses[httplib.OK])
<ide>
<del> def _foo_bar_object(self, method, url, body, headers):
<add> def _foo_bar_object_delete(self, method, url, body, headers):
<ide> # test_delete_object
<ide> return (httplib.NO_CONTENT,
<ide> body,
<ide> def test_download_object_success(self):
<ide> self.assertTrue(result)
<ide>
<ide> def test_download_object_invalid_file_size(self):
<del> self.mock_raw_response_klass.type = 'invalid_size'
<add> self.mock_response_klass.type = 'invalid_size'
<ide> container = Container(name='foo_bar_container', extra={},
<ide> driver=self.driver)
<ide> obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
<ide> def test_download_object_invalid_file_size(self):
<ide> self.assertFalse(result)
<ide>
<ide> def test_download_object_not_found(self):
<del> self.mock_raw_response_klass.type = 'not_found'
<add> self.mock_response_klass.type = 'not_found'
<ide> container = Container(name='foo_bar_container', extra={},
<ide> driver=self.driver)
<ide> obj = Object(name='foo_bar_object', size=1000, hash=None, extra={},
<ide> def test_upload_object_invalid_hash1(self):
<ide> def upload_file(self, object_name=None, content_type=None,
<ide> request_path=None, request_method=None,
<ide> headers=None, file_path=None, stream=None):
<del> return {'response': MockResponse(200, headers={'etag': '2345'}),
<add> return {'response': make_response(200, headers={'etag': '2345'}),
<ide> 'bytes_transferred': 1000,
<ide> 'data_hash': 'hash343hhash89h932439jsaa89'}
<ide>
<del> self.mock_raw_response_klass.type = 'INVALID_HASH1'
<add> self.mock_response_klass.type = 'INVALID_HASH1'
<ide>
<ide> old_func = self.driver_type._upload_object
<ide> self.driver_type._upload_object = upload_file
<ide> def test_upload_object_success(self):
<ide> def upload_file(self, object_name=None, content_type=None,
<ide> request_path=None, request_method=None,
<ide> headers=None, file_path=None, stream=None):
<del> return {'response': MockResponse(200,
<del> headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
<add> return {'response': make_response(200,
<add> headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
<ide> 'bytes_transferred': 1000,
<ide> 'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
<ide> self.mock_response_klass.type = None
<ide> def test_upload_object_with_acl(self):
<ide> def upload_file(self, object_name=None, content_type=None,
<ide> request_path=None, request_method=None,
<ide> headers=None, file_path=None, stream=None):
<del> return {'response': MockResponse(200, headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
<add> return {'response': make_response(200, headers={'etag': '0cc175b9c0f1b6a831c399e269772661'}),
<ide> 'bytes_transferred': 1000,
<ide> 'data_hash': '0cc175b9c0f1b6a831c399e269772661'}
<ide>
<ide> def test_upload_object_with_invalid_acl(self):
<ide>
<ide> def test_upload_empty_object_via_stream(self):
<ide> if self.driver.supports_multipart_upload:
<del> self.mock_raw_response_klass.type = 'multipart'
<ide> self.mock_response_klass.type = 'multipart'
<ide> else:
<del> self.mock_raw_response_klass.type = None
<ide> self.mock_response_klass.type = None
<ide>
<ide> container = Container(name='foo_bar_container', extra={},
<ide> def test_upload_empty_object_via_stream(self):
<ide>
<ide> def test_upload_small_object_via_stream(self):
<ide> if self.driver.supports_multipart_upload:
<del> self.mock_raw_response_klass.type = 'multipart'
<ide> self.mock_response_klass.type = 'multipart'
<ide> else:
<del> self.mock_raw_response_klass.type = None
<ide> self.mock_response_klass.type = None
<ide>
<ide> container = Container(name='foo_bar_container', extra={},
<ide> def test_upload_small_object_via_stream(self):
<ide>
<ide> def test_upload_big_object_via_stream(self):
<ide> if self.driver.supports_multipart_upload:
<del> self.mock_raw_response_klass.type = 'multipart'
<ide> self.mock_response_klass.type = 'multipart'
<ide> else:
<del> self.mock_raw_response_klass.type = None
<ide> self.mock_response_klass.type = None
<ide>
<ide> container = Container(name='foo_bar_container', extra={},
<ide> def test_upload_object_via_stream_abort(self):
<ide> if not self.driver.supports_multipart_upload:
<ide> return
<ide>
<del> self.mock_raw_response_klass.type = 'MULTIPART'
<ide> self.mock_response_klass.type = 'MULTIPART'
<ide>
<ide> def _faulty_iterator():
<ide> def test_delete_object_not_found(self):
<ide> obj=obj)
<ide>
<ide> def test_delete_object_success(self):
<add> self.mock_response_klass.type = 'delete'
<ide> container = Container(name='foo_bar_container', extra={},
<ide> driver=self.driver)
<ide> obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None,
| 1
|
Ruby
|
Ruby
|
update shallowing logic
|
bec303de86f9cef21a79ebea55da59dfa57fbdbb
|
<ide><path>Library/Homebrew/cmd/tap.rb
<ide> def tap
<ide> else
<ide> full_clone = if args.full?
<ide> true
<del> elsif args.shallow?.nil?
<del> !ENV["CI"]
<add> elsif !args.shallow?
<add> ENV["CI"].blank?
<ide> else
<ide> !args.shallow?
<ide> end
| 1
|
Text
|
Text
|
add mit license badge to readme
|
ede79f818e491a43ada3705314024021aed23d7c
|
<ide><path>README.md
<ide>
<ide> [](https://travis-ci.org/fchollet/keras)
<ide> [](https://badge.fury.io/py/keras)
<add>[](https://github.com/fchollet/keras/blob/master/LICENSE)
<ide>
<ide> ## You have just found Keras.
<ide>
| 1
|
PHP
|
PHP
|
throw exception if flash key value is not an array
|
293a6279740e6dbcb57b983e9f0b9bd40a294765
|
<ide><path>src/View/Helper/FlashHelper.php
<ide> class FlashHelper extends Helper {
<ide> * @param string $key The [Flash.]key you are rendering in the view.
<ide> * @param array $options Additional options to use for the creation of this flash message.
<ide> * Supports the 'params', and 'element' keys that are used in the helper.
<del> * @return string
<add> * @return string|null Rendered flash message or null if flash key does not exist
<add> * in session.
<add> * @throws \UnexpectedValueException If value for flash settings key is not an array.
<ide> */
<ide> public function render($key = 'flash', array $options = []) {
<ide> if (!$this->request->session()->check("Flash.$key")) {
<del> return '';
<add> return;
<ide> }
<ide>
<del> $flash = $options + $this->request->session()->read("Flash.$key");
<add> $flash = $this->request->session()->read("Flash.$key");
<add> if (!is_array($flash)) {
<add> throw new \UnexpectedValueException(sprintf(
<add> 'Value for flash setting key "%s" must be an array.',
<add> $key
<add> ));
<add> }
<add> $flash = $options + $flash;
<ide> $this->request->session()->delete("Flash.$key");
<ide>
<ide> return $this->_View->element($flash['element'], $flash);
<ide><path>tests/TestCase/View/Helper/FlashHelperTest.php
<ide> public function testFlash() {
<ide>
<ide> $expected['child'] = ['tag' => 'p', 'content' => 'This is a test of the emergency broadcasting system'];
<ide> $this->assertTag($expected, $result);
<add>
<add> $this->assertNull($this->Flash->render('non-existent'));
<add> }
<add>
<add>/**
<add> * testFlashThrowsException
<add> *
<add> * @expectedException \UnexpectedValueException
<add> */
<add> public function testFlashThrowsException() {
<add> $this->View->request->session()->write('Flash.foo', 'bar');
<add> $this->Flash->render('foo');
<ide> }
<ide>
<ide> /**
| 2
|
Ruby
|
Ruby
|
remove array.wrap calls in activesupport
|
b33bd077fab8e249ba3ddfb7e656c34243c05a1d
|
<ide><path>activesupport/lib/active_support/callbacks.rb
<ide> require 'active_support/concern'
<ide> require 'active_support/descendants_tracker'
<del>require 'active_support/core_ext/array/wrap'
<ide> require 'active_support/core_ext/class/attribute'
<ide> require 'active_support/core_ext/kernel/reporting'
<ide> require 'active_support/core_ext/kernel/singleton_class'
<ide> def clone(chain, klass)
<ide> end
<ide>
<ide> def normalize_options!(options)
<del> options[:if] = Array.wrap(options[:if])
<del> options[:unless] = Array.wrap(options[:unless])
<add> options[:if] = Array(options[:if])
<add> options[:unless] = Array(options[:unless])
<ide>
<ide> options[:per_key] ||= {}
<del> options[:per_key][:if] = Array.wrap(options[:per_key][:if])
<del> options[:per_key][:unless] = Array.wrap(options[:per_key][:unless])
<add> options[:per_key][:if] = Array(options[:per_key][:if])
<add> options[:per_key][:unless] = Array(options[:per_key][:unless])
<ide> end
<ide>
<ide> def name
<ide> def _compile_options(options)
<ide> conditions = ["true"]
<ide>
<ide> unless options[:if].empty?
<del> conditions << Array.wrap(_compile_filter(options[:if]))
<add> conditions << Array(_compile_filter(options[:if]))
<ide> end
<ide>
<ide> unless options[:unless].empty?
<del> conditions << Array.wrap(_compile_filter(options[:unless])).map {|f| "!#{f}"}
<add> conditions << Array(_compile_filter(options[:unless])).map {|f| "!#{f}"}
<ide> end
<ide>
<ide> conditions.flatten.join(" && ")
<ide> def _compile_filter(filter)
<ide> @klass.send(:define_method, "#{method_name}_object") { filter }
<ide>
<ide> _normalize_legacy_filter(kind, filter)
<del> scopes = Array.wrap(chain.config[:scope])
<add> scopes = Array(chain.config[:scope])
<ide> method_to_call = scopes.map{ |s| s.is_a?(Symbol) ? send(s) : s }.join("_")
<ide>
<ide> @klass.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1
<ide><path>activesupport/lib/active_support/deprecation/behaviors.rb
<ide> require "active_support/notifications"
<del>require "active_support/core_ext/array/wrap"
<ide>
<ide> module ActiveSupport
<ide> module Deprecation
<ide> def behavior
<ide> # ActiveSupport::Deprecation.behavior = :stderr
<ide> # ActiveSupport::Deprecation.behavior = [:stderr, :log]
<ide> def behavior=(behavior)
<del> @behavior = Array.wrap(behavior).map { |b| DEFAULT_BEHAVIORS[b] || b }
<add> @behavior = Array(behavior).map { |b| DEFAULT_BEHAVIORS[b] || b }
<ide> end
<ide> end
<ide>
<ide><path>activesupport/lib/active_support/file_update_checker.rb
<del>require "active_support/core_ext/array/wrap"
<ide> require "active_support/core_ext/array/extract_options"
<ide>
<ide> module ActiveSupport
<ide> def compile_glob(hash) #:nodoc:
<ide> end
<ide>
<ide> def compile_ext(array) #:nodoc:
<del> array = Array.wrap(array)
<add> array = Array(array)
<ide> return if array.empty?
<ide> ".{#{array.join(",")}}"
<ide> end
<ide><path>activesupport/lib/active_support/json/encoding.rb
<ide>
<ide> require 'bigdecimal'
<ide> require 'active_support/core_ext/big_decimal/conversions' # for #to_s
<del>require 'active_support/core_ext/array/wrap'
<ide> require 'active_support/core_ext/hash/except'
<ide> require 'active_support/core_ext/hash/slice'
<ide> require 'active_support/core_ext/object/instance_variables'
<ide> def as_json(options = nil) #:nodoc:
<ide> # create a subset of the hash by applying :only or :except
<ide> subset = if options
<ide> if attrs = options[:only]
<del> slice(*Array.wrap(attrs))
<add> slice(*Array(attrs))
<ide> elsif attrs = options[:except]
<del> except(*Array.wrap(attrs))
<add> except(*Array(attrs))
<ide> else
<ide> self
<ide> end
<ide><path>activesupport/lib/active_support/tagged_logging.rb
<ide> def initialize(logger)
<ide>
<ide> def tagged(*new_tags)
<ide> tags = current_tags
<del> new_tags = Array.wrap(new_tags).flatten.reject(&:blank?)
<add> new_tags = Array(new_tags).flatten.reject(&:blank?)
<ide> tags.concat new_tags
<ide> yield
<ide> ensure
<ide><path>activesupport/lib/active_support/testing/assertions.rb
<del>require 'active_support/core_ext/array/wrap'
<ide> require 'active_support/core_ext/object/blank'
<ide>
<ide> module ActiveSupport
<ide> module Assertions
<ide> # post :delete, :id => ...
<ide> # end
<ide> def assert_difference(expression, difference = 1, message = nil, &block)
<del> expressions = Array.wrap expression
<add> expressions = Array(expression)
<ide>
<ide> exps = expressions.map { |e|
<ide> e.respond_to?(:call) ? e : lambda { eval(e, block.binding) }
| 6
|
Python
|
Python
|
add type hints for searches/ternary_search.py
|
7df91e681a7270d9ff10e504abe81df5bdd34776
|
<ide><path>searches/ternary_search.py
<ide> Space Complexity : O(1)
<ide> """
<ide> import sys
<add>from typing import List
<ide>
<ide> # This is the precision for this function which can be altered.
<ide> # It is recommended for users to keep this number greater than or equal to 10.
<ide> precision = 10
<ide>
<ide>
<ide> # This is the linear search that will occur after the search space has become smaller.
<del>def lin_search(left, right, A, target):
<add>def lin_search(left: int, right: int, A: List[int], target: int):
<ide> for i in range(left, right + 1):
<ide> if A[i] == target:
<ide> return i
<ide>
<ide>
<ide> # This is the iterative method of the ternary search algorithm.
<del>def ite_ternary_search(A, target):
<add>def ite_ternary_search(A: List[int], target: int):
<ide> left = 0
<ide> right = len(A) - 1
<ide> while True:
<ide> def ite_ternary_search(A, target):
<ide>
<ide>
<ide> # This is the recursive method of the ternary search algorithm.
<del>def rec_ternary_search(left, right, A, target):
<add>def rec_ternary_search(left: int, right: int, A: List[int], target: int):
<ide> if left < right:
<ide>
<ide> if right - left < precision:
<ide> def rec_ternary_search(left, right, A, target):
<ide>
<ide>
<ide> # This function is to check if the array is sorted.
<del>def __assert_sorted(collection):
<add>def __assert_sorted(collection: List[int]) -> bool:
<ide> if collection != sorted(collection):
<ide> raise ValueError("Collection must be sorted")
<ide> return True
| 1
|
PHP
|
PHP
|
add beanstalk queue block_for config key
|
2588b254a0ee43c2cd318905694a548a343aafe3
|
<ide><path>config/queue.php
<ide> 'host' => 'localhost',
<ide> 'queue' => 'default',
<ide> 'retry_after' => 90,
<add> 'block_for' => 0,
<ide> ],
<ide>
<ide> 'sqs' => [
| 1
|
Text
|
Text
|
include router.aspath caveats in docs
|
5b893663fdb3658de4e8ce2bb5966fbfe4e905ef
|
<ide><path>docs/advanced-features/automatic-static-optimization.md
<ide> And if you add `getServerSideProps` to the page, it will then be JavaScript, lik
<ide>
<ide> - If you have a [custom `App`](/docs/advanced-features/custom-app.md) with `getInitialProps` then this optimization will be turned off in pages without [Static Generation](/docs/basic-features/data-fetching/get-static-props.md).
<ide> - If you have a [custom `Document`](/docs/advanced-features/custom-document.md) with `getInitialProps` be sure you check if `ctx.req` is defined before assuming the page is server-side rendered. `ctx.req` will be `undefined` for pages that are prerendered.
<add>- Avoid using the `asPath` value on [`next/router`](/docs/api-reference/next/router.md#router-object) in the rendering tree until the router's `isReady` field is `true`. Statically optimized pages only know `asPath` on the client and not the server, so using it as a prop may lead to mismatch errors. The `active-class-name` example demonstrates one way to use `asPath` as a prop.
<ide><path>docs/api-reference/next/router.md
<ide> The following is the definition of the `router` object returned by both [`useRou
<ide> - `isReady`: `boolean` - Whether the router fields are updated client-side and ready for use. Should only be used inside of `useEffect` methods and not for conditionally rendering on the server. See related docs for use case with [automatically statically optimized pages](/docs/advanced-features/automatic-static-optimization.md)
<ide> - `isPreview`: `boolean` - Whether the application is currently in [preview mode](/docs/advanced-features/preview-mode.md).
<ide>
<add>> Using the `asPath` field may lead to a mismatch between client and server if the page is rendered using server-side rendering or [automatic static optimization](/docs/advanced-features/automatic-static-optimization.md). Avoid using `asPath` until the `isReady` field is `true`.
<add>
<ide> The following methods are included inside `router`:
<ide>
<ide> ### router.push
| 2
|
PHP
|
PHP
|
remove function aliases
|
ce8aff793ef96c3383054209ef2b7f07174f354f
|
<ide><path>src/Mailer/Mailer.php
<ide> public function getName()
<ide> static::$name = str_replace(
<ide> 'Mailer',
<ide> '',
<del> join('', array_slice(explode('\\', get_class($this)), -1))
<add> implode('', array_slice(explode('\\', get_class($this)), -1))
<ide> );
<ide> }
<ide>
| 1
|
Java
|
Java
|
call super.onactivityresult in reactactivity
|
29249e19bd9cb4de8cb5b00edcd17f2c49d0d02c
|
<ide><path>ReactAndroid/src/main/java/com/facebook/react/ReactActivity.java
<ide> protected void onDestroy() {
<ide>
<ide> @Override
<ide> public void onActivityResult(int requestCode, int resultCode, Intent data) {
<add> super.onActivityResult(requestCode, resultCode, data);
<ide> mDelegate.onActivityResult(requestCode, resultCode, data);
<ide> }
<ide>
| 1
|
Text
|
Text
|
fix incorrect spelling of bcrypt
|
94635dcc0bb155dbeabff6e2f01360af52629100
|
<ide><path>curriculum/challenges/chinese/06-information-security-and-quality-assurance/information-security-with-helmetjs/understand-bcrypt-hashes.chinese.md
<ide> localeTitle: 了解BCrypt Hashes
<ide>
<ide> ```yml
<ide> tests:
<del> - text: BCyrpt是一个依赖
<add> - text: BCrypt是一个依赖
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/package.json") .then(data => { var packJson = JSON.parse(data); assert.property(packJson.dependencies, "bcrypt", "Your project should list "bcrypt" as a dependency"); }, xhr => { throw new Error(xhr.statusText); })'
<ide> - text: 已经适当地要求BCrypt
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/server.js").then(data => {assert.match(data, /bcrypt.*=.*require.*("|")bcrypt("|")/gi, "You should correctly require and instantiate socket.io as io.");}, xhr => { throw new Error(xhr.statusText); })'
<ide><path>curriculum/challenges/english/06-information-security-and-quality-assurance/information-security-with-helmetjs/understand-bcrypt-hashes.english.md
<ide> Submit your page when you think you've got it right.
<ide>
<ide> ```yml
<ide> tests:
<del> - text: BCyrpt is a dependency
<add> - text: BCrypt is a dependency
<ide> testString: getUserInput => $.get(getUserInput('url')+ '/_api/package.json') .then(data => { var packJson = JSON.parse(data); assert.property(packJson.dependencies, 'bcrypt', 'Your project should list "bcrypt" as a dependency'); }, xhr => { throw new Error(xhr.statusText); })
<ide> - text: BCrypt has been properly required
<ide> testString: getUserInput => $.get(getUserInput('url')+ '/_api/server.js').then(data => {assert.match(data, /bcrypt.*=.*require.*('|")bcrypt('|")/gi, 'You should correctly require and instantiate socket.io as io.');}, xhr => { throw new Error(xhr.statusText); })
<ide><path>curriculum/challenges/portuguese/06-information-security-and-quality-assurance/information-security-with-helmetjs/understand-bcrypt-hashes.portuguese.md
<ide> localeTitle: Entenda os Hashes do BCrypt
<ide>
<ide> ```yml
<ide> tests:
<del> - text: BCyrpt é uma dependência
<add> - text: BCrypt é uma dependência
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/package.json") .then(data => { var packJson = JSON.parse(data); assert.property(packJson.dependencies, "bcrypt", "Your project should list "bcrypt" as a dependency"); }, xhr => { throw new Error(xhr.statusText); })'
<ide> - text: O BCrypt foi devidamente requerido
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/server.js").then(data => {assert.match(data, /bcrypt.*=.*require.*("|")bcrypt("|")/gi, "You should correctly require and instantiate socket.io as io.");}, xhr => { throw new Error(xhr.statusText); })'
<ide><path>curriculum/challenges/spanish/06-information-security-and-quality-assurance/information-security-with-helmetjs/understand-bcrypt-hashes.spanish.md
<ide> localeTitle: Comprender Hashes de Egipto
<ide>
<ide> ```yml
<ide> tests:
<del> - text: BCyrpt es una dependencia
<add> - text: BCrypt es una dependencia
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/package.json") .then(data => { var packJson = JSON.parse(data); assert.property(packJson.dependencies, "bcrypt", "Your project should list "bcrypt" as a dependency"); }, xhr => { throw new Error(xhr.statusText); })'
<ide> - text: BCrypt ha sido correctamente requerido
<ide> testString: 'getUserInput => $.get(getUserInput("url")+ "/_api/server.js").then(data => {assert.match(data, /bcrypt.*=.*require.*("|")bcrypt("|")/gi, "You should correctly require and instantiate socket.io as io.");}, xhr => { throw new Error(xhr.statusText); })'
| 4
|
Go
|
Go
|
fix issue with missing fields for `ps` template
|
b4ab4530841417305bcaa0b0d25d172bfbf3956a
|
<ide><path>cli/command/container/list.go
<ide> func newListCommand(dockerCli *command.DockerCli) *cobra.Command {
<ide> type preProcessor struct {
<ide> types.Container
<ide> opts *types.ContainerListOptions
<add>
<add> // Fields that need to exist so the template doesn't error out
<add> // These are needed since they are available on the final object but are not
<add> // fields in types.Container
<add> // TODO(cpuguy83): this seems rather broken
<add> Networks, CreatedAt, RunningFor bool
<ide> }
<ide>
<ide> // Size sets the size option when called by a template execution.
<ide> func (p *preProcessor) Size() bool {
<ide> return true
<ide> }
<ide>
<del>// Networks does nothing but return true.
<del>// It is needed to avoid the template check to fail as this field
<del>// doesn't exist in `types.Container`
<del>func (p *preProcessor) Networks() bool {
<del> return true
<del>}
<del>
<ide> func buildContainerListOptions(opts *psOptions) (*types.ContainerListOptions, error) {
<ide> options := &types.ContainerListOptions{
<ide> All: opts.all,
<ide><path>cli/command/formatter/container_test.go
<ide> func TestContainerContextWriteJSONField(t *testing.T) {
<ide> assert.Equal(t, s, containers[i].ID)
<ide> }
<ide> }
<add>
<add>func TestContainerBackCompat(t *testing.T) {
<add> containers := []types.Container{types.Container{ID: "brewhaha"}}
<add> cases := []string{
<add> "ID",
<add> "Names",
<add> "Image",
<add> "Command",
<add> "CreatedAt",
<add> "RunningFor",
<add> "Ports",
<add> "Status",
<add> "Size",
<add> "Labels",
<add> "Mounts",
<add> }
<add> buf := bytes.NewBuffer(nil)
<add> for _, c := range cases {
<add> ctx := Context{Format: Format(fmt.Sprintf("{{ .%s }}", c)), Output: buf}
<add> if err := ContainerWrite(ctx, containers); err != nil {
<add> t.Log("could not render template for field '%s': %v", c, err)
<add> t.Fail()
<add> }
<add> buf.Reset()
<add> }
<add>}
| 2
|
Go
|
Go
|
fix permission detection for aufs
|
1a0347ff1dd1035066b5faf86db0587491b5d638
|
<ide><path>integration-cli/docker_cli_save_load_test.go
<ide> func TestSaveMultipleNames(t *testing.T) {
<ide> // Issue #6722 #5892 ensure directories are included in changes
<ide> func TestSaveDirectoryPermissions(t *testing.T) {
<ide> layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
<add> layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"}
<ide>
<ide> name := "save-directory-permissions"
<ide> tmpDir, err := ioutil.TempDir("", "save-layers-with-directories")
<ide> func TestSaveDirectoryPermissions(t *testing.T) {
<ide> t.Fatalf("encountered error while listing tar entries: %s", err)
<ide> }
<ide>
<del> if reflect.DeepEqual(entries, layerEntries) {
<add> if reflect.DeepEqual(entries, layerEntries) || reflect.DeepEqual(entries, layerEntriesAUFS) {
<ide> found = true
<add> break
<ide> }
<ide> }
<ide> }
| 1
|
Python
|
Python
|
remove unused code
|
b197ba91d7bd3c390b67cd67da2eb381b25a22b2
|
<ide><path>keras/layers/local.py
<ide> def compute_output_shape(self, input_shape):
<ide> return (input_shape[0], rows, cols, self.filters)
<ide>
<ide> def call(self, inputs):
<del> _, _, filters = self.kernel_shape
<del>
<ide> output = K.local_conv2d(inputs,
<ide> self.kernel,
<ide> self.kernel_size,
<ide> def call(self, inputs):
<ide> self.data_format)
<ide>
<ide> if self.use_bias:
<del> if self.data_format == 'channels_first' or self.data_format == 'channels_last':
<del> output = K.bias_add(output, self.bias, data_format=self.data_format)
<add> output = K.bias_add(output, self.bias, data_format=self.data_format)
<ide>
<ide> output = self.activation(output)
<ide> return output
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.