content_type
stringclasses 8
values | main_lang
stringclasses 7
values | message
stringlengths 1
50
| sha
stringlengths 40
40
| patch
stringlengths 52
962k
| file_count
int64 1
300
|
|---|---|---|---|---|---|
PHP
|
PHP
|
fix ambiguous content types in requesthandler
|
6e4493cc146bc98fdea9ebf70165e86297a37668
|
<ide><path>lib/Cake/Controller/Component/RequestHandlerComponent.php
<ide> protected function _setExtension() {
<ide> }
<ide> $extensions = Router::extensions();
<ide> $preferred = array_shift($accept);
<del> $preferredTypes = $this->mapType($preferred);
<add> $preferredTypes = $this->response->mapType($preferred);
<ide> $similarTypes = array_intersect($extensions, $preferredTypes);
<del> if (count($similarTypes) === 1 && !in_array('html', $preferredTypes)) {
<add> if (count($similarTypes) === 1 && !in_array('xhtml', $preferredTypes) && !in_array('html', $preferredTypes)) {
<ide> $this->ext = array_shift($similarTypes);
<ide> }
<ide> }
<ide><path>lib/Cake/Test/Case/Controller/Component/RequestHandlerComponentTest.php
<ide> public function testInitializeNoContentTypeWithSingleAccept() {
<ide> $this->RequestHandler->initialize($this->Controller);
<ide> $this->assertNull($this->RequestHandler->ext);
<ide> }
<add>
<ide> /**
<ide> * Test that ext is not set with multiple accepted content types.
<ide> *
<ide> public function testInitializeNoContentTypeWithMultipleAcceptedTypes() {
<ide> $this->RequestHandler->initialize($this->Controller);
<ide> $this->assertNull($this->RequestHandler->ext);
<ide> }
<add>
<add>/**
<add> * Test that ext is not set with confusing android accepts headers.
<add> *
<add> * @return void
<add> */
<add> public function testInitializeAmbiguousAndroidAccepts() {
<add> $_SERVER['HTTP_ACCEPT'] = 'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5';
<add> $this->assertNull($this->RequestHandler->ext);
<add> Router::parseExtensions('html', 'xml');
<add>
<add> $this->RequestHandler->initialize($this->Controller);
<add> $this->assertNull($this->RequestHandler->ext);
<add> }
<ide>
<ide> /**
<ide> * Test that a type mismatch doesn't incorrectly set the ext
| 2
|
Java
|
Java
|
implement measure for nodes textinput
|
055b31a16590954c79b90ac2b9d5ad7cd89a0509
|
<ide><path>ReactAndroid/src/main/java/com/facebook/react/flat/RCTTextInput.java
<ide>
<ide> package com.facebook.react.flat;
<ide>
<del>import android.text.SpannableStringBuilder;
<add>import javax.annotation.Nullable;
<ide>
<add>import android.text.SpannableStringBuilder;
<add>import android.util.TypedValue;
<add>import android.view.ViewGroup;
<add>import android.widget.EditText;
<add>
<add>import com.facebook.csslayout.CSSNode;
<add>import com.facebook.csslayout.MeasureOutput;
<add>import com.facebook.csslayout.Spacing;
<add>import com.facebook.infer.annotation.Assertions;
<add>import com.facebook.react.uimanager.PixelUtil;
<add>import com.facebook.react.uimanager.ThemedReactContext;
<ide> import com.facebook.react.uimanager.UIViewOperationQueue;
<add>import com.facebook.react.uimanager.ViewDefaults;
<add>import com.facebook.react.uimanager.ViewProps;
<ide> import com.facebook.react.uimanager.annotations.ReactProp;
<ide> import com.facebook.react.views.text.ReactTextUpdate;
<ide>
<ide> import static com.facebook.react.views.text.ReactTextShadowNode.UNSET;
<ide>
<del>public class RCTTextInput extends RCTVirtualText implements AndroidView {
<add>public class RCTTextInput extends RCTVirtualText implements AndroidView, CSSNode.MeasureFunction {
<add>
<ide> private int mJsEventCount = UNSET;
<ide> private boolean mPaddingChanged = false;
<add> private int mNumberOfLines = UNSET;
<add> private @Nullable EditText mEditText;
<ide>
<ide> public RCTTextInput() {
<ide> forceMountToView();
<add> setMeasureFunction(this);
<add> }
<add>
<add> @Override
<add> public void setThemedContext(ThemedReactContext themedContext) {
<add> super.setThemedContext(themedContext);
<add>
<add> mEditText = new EditText(themedContext);
<add> // This is needed to fix an android bug since 4.4.3 which will throw an NPE in measure,
<add> // setting the layoutParams fixes it: https://code.google.com/p/android/issues/detail?id=75877
<add> mEditText.setLayoutParams(
<add> new ViewGroup.LayoutParams(
<add> ViewGroup.LayoutParams.WRAP_CONTENT,
<add> ViewGroup.LayoutParams.WRAP_CONTENT));
<add>
<add> setDefaultPadding(Spacing.LEFT, mEditText.getPaddingLeft());
<add> setDefaultPadding(Spacing.TOP, mEditText.getPaddingTop());
<add> setDefaultPadding(Spacing.RIGHT, mEditText.getPaddingRight());
<add> setDefaultPadding(Spacing.BOTTOM, mEditText.getPaddingBottom());
<add> }
<add>
<add> @Override
<add> public void measure(CSSNode node, float width, float height, MeasureOutput measureOutput) {
<add> // measure() should never be called before setThemedContext()
<add> EditText editText = Assertions.assertNotNull(mEditText);
<add>
<add> int fontSize = getFontSize();
<add> measureOutput.width = width;
<add> editText.setTextSize(
<add> TypedValue.COMPLEX_UNIT_PX,
<add> fontSize == UNSET ?
<add> (int) Math.ceil(PixelUtil.toPixelFromSP(ViewDefaults.FONT_SIZE_SP)) : fontSize);
<add> Spacing padding = getPadding();
<add> editText.setPadding(
<add> (int) Math.ceil(padding.get(Spacing.LEFT)),
<add> (int) Math.ceil(padding.get(Spacing.TOP)),
<add> (int) Math.ceil(padding.get(Spacing.RIGHT)),
<add> (int) Math.ceil(padding.get(Spacing.BOTTOM)));
<add>
<add> if (mNumberOfLines != UNSET) {
<add> editText.setLines(mNumberOfLines);
<add> }
<add>
<add> editText.measure(0 /* unspecified */, 0 /* unspecified */);
<add> measureOutput.height = editText.getMeasuredHeight();
<ide> }
<ide>
<ide> @Override
<ide> public void setMostRecentEventCount(int mostRecentEventCount) {
<ide> mJsEventCount = mostRecentEventCount;
<ide> }
<ide>
<add> @ReactProp(name = ViewProps.NUMBER_OF_LINES, defaultInt = Integer.MAX_VALUE)
<add> public void setNumberOfLines(int numberOfLines) {
<add> mNumberOfLines = numberOfLines;
<add> notifyChanged(true);
<add> }
<add>
<ide> @Override
<ide> public void setPadding(int spacingType, float padding) {
<ide> if (getPadding().set(spacingType, padding)) {
| 1
|
Ruby
|
Ruby
|
fix typo in dispatcher#controller documentation
|
702aecb126712de9f996da74357cafe14f449d24
|
<ide><path>actionpack/lib/action_dispatch/routing/route_set.rb
<ide> def prepare_params!(params)
<ide>
<ide> # If this is a default_controller (i.e. a controller specified by the user)
<ide> # we should raise an error in case it's not found, because it usually means
<del> # an user error. However, if the controller was retrieved through a dynamic
<add> # a user error. However, if the controller was retrieved through a dynamic
<ide> # segment, as in :controller(/:action), we should simply return nil and
<ide> # delegate the control back to Rack cascade. Besides, if this is not a default
<ide> # controller, it means we should respect the @scope[:module] parameter.
| 1
|
Go
|
Go
|
add missing defines
|
a015f38f4a886518828fe3807ee7dc9ff8ab9585
|
<ide><path>graphdriver/devmapper/devmapper_wrapper.go
<ide> package devmapper
<ide> #include <linux/loop.h> // FIXME: present only for defines, maybe we can remove it?
<ide> #include <linux/fs.h> // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
<ide>
<add>#ifndef LOOP_CTL_GET_FREE
<add> #define LOOP_CTL_GET_FREE 0x4C82
<add>#endif
<add>
<add>#ifndef LO_FLAGS_PARTSCAN
<add> #define LO_FLAGS_PARTSCAN 8
<add>#endif
<add>
<ide> // FIXME: Can't we find a way to do the logging in pure Go?
<ide> extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
<ide>
<ide> type (
<ide> }
<ide> )
<ide>
<del>// FIXME: Make sure the values are defined in C
<ide> // IOCTL consts
<ide> const (
<ide> BlkGetSize64 = C.BLKGETSIZE64
| 1
|
Ruby
|
Ruby
|
push the parameter parsers on to the class
|
b93c226d19615fe504f9e12d6c0ee2d70683e5fa
|
<ide><path>actionpack/lib/action_dispatch/http/parameters.rb
<ide> module Http
<ide> module Parameters
<ide> PARAMETERS_KEY = 'action_dispatch.request.path_parameters'
<ide>
<add> DEFAULT_PARSERS = {
<add> Mime::JSON => lambda { |raw_post|
<add> data = ActiveSupport::JSON.decode(raw_post)
<add> data.is_a?(Hash) ? data : {:_json => data}
<add> }
<add> }
<add>
<add> def self.included(klass)
<add> class << klass
<add> attr_accessor :parameter_parsers
<add> end
<add>
<add> klass.parameter_parsers = DEFAULT_PARSERS
<add> end
<ide> # Returns both GET and POST \parameters in a single hash.
<ide> def parameters
<ide> params = get_header("action_dispatch.request.parameters")
<ide> def path_parameters=(parameters) #:nodoc:
<ide> def path_parameters
<ide> get_header(PARAMETERS_KEY) || {}
<ide> end
<add>
<add> private
<add>
<add> def parse_formatted_parameters(request, parsers)
<add> return yield if request.content_length.zero?
<add>
<add> strategy = parsers.fetch(request.content_mime_type) { return yield }
<add>
<add> begin
<add> strategy.call(request.raw_post)
<add> rescue => e # JSON or Ruby code block errors
<add> my_logger = logger || ActiveSupport::Logger.new($stderr)
<add> my_logger.debug "Error occurred while parsing request parameters.\nContents:\n\n#{request.raw_post}"
<add>
<add> raise ParamsParser::ParseError.new(e.message, e)
<add> end
<add> end
<add>
<add> def params_parsers
<add> ActionDispatch::Request.parameter_parsers
<add> end
<ide> end
<ide> end
<ide> end
<ide><path>actionpack/lib/action_dispatch/http/request.rb
<ide> def POST
<ide> end
<ide> alias :request_parameters :POST
<ide>
<del> def params_parsers
<del> fetch_header "action_dispatch.request.params_parsers" do
<del> {}
<del> end
<del> end
<del>
<del> def params_parsers= hash
<del> set_header "action_dispatch.request.params_parsers", hash
<del> end
<del>
<ide> # Returns the authorization header regardless of whether it was specified directly or through one of the
<ide> # proxy alternatives.
<ide> def authorization
<ide> def check_method(name)
<ide> HTTP_METHOD_LOOKUP[name] || raise(ActionController::UnknownHttpMethod, "#{name}, accepted HTTP methods are #{HTTP_METHODS[0...-1].join(', ')}, and #{HTTP_METHODS[-1]}")
<ide> name
<ide> end
<del>
<del> def parse_formatted_parameters(request, parsers)
<del> return yield if request.content_length.zero?
<del>
<del> strategy = parsers.fetch(request.content_mime_type) { return yield }
<del>
<del> begin
<del> strategy.call(request.raw_post)
<del> rescue => e # JSON or Ruby code block errors
<del> my_logger = logger || ActiveSupport::Logger.new($stderr)
<del> my_logger.debug "Error occurred while parsing request parameters.\nContents:\n\n#{request.raw_post}"
<del>
<del> raise ParamsParser::ParseError.new(e.message, e)
<del> end
<del> end
<ide> end
<ide> end
<ide><path>actionpack/lib/action_dispatch/middleware/params_parser.rb
<ide> def initialize(message, original_exception)
<ide> end
<ide> end
<ide>
<del> DEFAULT_PARSERS = {
<del> Mime::JSON => lambda { |raw_post|
<del> data = ActiveSupport::JSON.decode(raw_post)
<del> data.is_a?(Hash) ? data : {:_json => data}
<del> }
<del> }
<del>
<ide> # Create a new +ParamsParser+ middleware instance.
<ide> #
<ide> # The +parsers+ argument can take Hash of parsers where key is identifying
<ide> # content mime type, and value is a lambda that is going to process data.
<ide> def initialize(app, parsers = {})
<del> @app, @parsers = app, DEFAULT_PARSERS.merge(parsers)
<add> @app = app
<add> ActionDispatch::Request.parameter_parsers = ActionDispatch::Request::DEFAULT_PARSERS.merge(parsers)
<ide> end
<ide>
<ide> def call(env)
<del> request = Request.new(env)
<del>
<del> request.params_parsers = @parsers
<del>
<ide> @app.call(env)
<ide> end
<ide> end
| 3
|
PHP
|
PHP
|
fix tests that relied on protected methods
|
e9f7c4127a71f335b0def5d3a304b06e0a183084
|
<ide><path>tests/TestCase/Controller/Component/RequestHandlerComponentTest.php
<ide> public function testStartupCallback()
<ide> $event = new Event('Controller.beforeRender', $this->Controller);
<ide> $_SERVER['REQUEST_METHOD'] = 'PUT';
<ide> $_SERVER['CONTENT_TYPE'] = 'application/xml';
<del> $this->Controller->request = $this->getMockBuilder('Cake\Network\Request')
<del> ->setMethods(['_readInput'])
<del> ->getMock();
<add> $this->Controller->request = new Request();
<ide> $this->RequestHandler->beforeRender($event);
<ide> $this->assertTrue(is_array($this->Controller->request->data));
<ide> $this->assertFalse(is_object($this->Controller->request->data));
<ide> public function testStartupCallbackCharset()
<ide> $event = new Event('Controller.startup', $this->Controller);
<ide> $_SERVER['REQUEST_METHOD'] = 'PUT';
<ide> $_SERVER['CONTENT_TYPE'] = 'application/xml; charset=UTF-8';
<del> $this->Controller->request = $this->getMockBuilder('Cake\Network\Request')
<del> ->setMethods(['_readInput'])
<del> ->getMock();
<add> $this->Controller->request = new Request();
<ide> $this->RequestHandler->startup($event);
<ide> $this->assertTrue(is_array($this->Controller->request->data));
<ide> $this->assertFalse(is_object($this->Controller->request->data));
<ide> public function testStartupCallbackCharset()
<ide> */
<ide> public function testStartupProcessData()
<ide> {
<del> $this->Controller->request = $this->getMockBuilder('Cake\Network\Request')
<del> ->setMethods(['_readInput'])
<del> ->getMock();
<del> $this->Controller->request->expects($this->at(0))
<del> ->method('_readInput')
<del> ->will($this->returnValue(''));
<del> $this->Controller->request->expects($this->at(1))
<del> ->method('_readInput')
<del> ->will($this->returnValue('"invalid"'));
<del> $this->Controller->request->expects($this->at(2))
<del> ->method('_readInput')
<del> ->will($this->returnValue('{"valid":true}'));
<del>
<add> $this->Controller->request = new Request();
<ide> $this->Controller->request->env('REQUEST_METHOD', 'POST');
<ide> $this->Controller->request->env('CONTENT_TYPE', 'application/json');
<ide>
<ide> $event = new Event('Controller.startup', $this->Controller);
<ide> $this->RequestHandler->startup($event);
<ide> $this->assertEquals([], $this->Controller->request->data);
<ide>
<add> $this->Controller->request->setInput('"invalid"');
<ide> $this->RequestHandler->startup($event);
<ide> $this->assertEquals(['invalid'], $this->Controller->request->data);
<ide>
<add> $this->Controller->request->setInput('{"valid":true}');
<ide> $this->RequestHandler->startup($event);
<ide> $this->assertEquals(['valid' => true], $this->Controller->request->data);
<ide> }
<ide> public function testStartupProcessData()
<ide> */
<ide> public function testStartupIgnoreFileAsXml()
<ide> {
<del> $this->Controller->request = $this->getMockBuilder('Cake\Network\Request')
<del> ->setMethods(['_readInput'])
<del> ->getMock();
<del> $this->Controller->request->expects($this->any())
<del> ->method('_readInput')
<del> ->will($this->returnValue('/dev/random'));
<del>
<add> $this->Controller->request = new Request(['input' => '/dev/random']);
<ide> $this->Controller->request->env('REQUEST_METHOD', 'POST');
<ide> $this->Controller->request->env('CONTENT_TYPE', 'application/xml');
<ide>
<ide> public function testStartupIgnoreFileAsXml()
<ide> public function testStartupCustomTypeProcess()
<ide> {
<ide> $restore = error_reporting(E_ALL & ~E_USER_DEPRECATED);
<del> $this->Controller->request = $this->getMockBuilder('Cake\Network\Request')
<del> ->setMethods(['_readInput'])
<del> ->getMock();
<del> $this->Controller->request->expects($this->once())
<del> ->method('_readInput')
<del> ->will($this->returnValue('"A","csv","string"'));
<add> $this->Controller->request = new Request([
<add> 'input' => '"A","csv","string"'
<add> ]);
<ide> $this->RequestHandler->addInputType('csv', ['str_getcsv']);
<ide> $this->Controller->request->env('REQUEST_METHOD', 'POST');
<ide> $this->Controller->request->env('CONTENT_TYPE', 'text/csv');
| 1
|
Python
|
Python
|
fix string in test for python 3k
|
677d349c22dd5538dd4bb962df11814f23a46b35
|
<ide><path>numpy/core/tests/test_regression.py
<ide> def test_string_astype(self):
<ide>
<ide> def test_string_astype(self):
<ide> """Ticket #1756 """
<del> s = '12345'
<add> s = asbytes('12345')
<ide> a = np.array([s]*5)
<ide> for i in range(1,6):
<ide> a1 = np.array(a, "|S%d"%i)
| 1
|
Python
|
Python
|
add `python_requires` metadata for pypi
|
a22d143f96ee3c7d14887e871b34a84683835a3f
|
<ide><path>setup.py
<ide> include_package_data=True,
<ide> zip_safe=False,
<ide> platforms='any',
<add> python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
<ide> install_requires=[
<ide> 'Werkzeug>=0.14',
<ide> 'Jinja2>=2.10',
| 1
|
Text
|
Text
|
update a link
|
1f0267bb6dfdfc1871a488b35fe7d2821b73ffac
|
<ide><path>SUPPORTERS.md
<ide> These brilliant people supported our Kickstarter by giving us £15 or more:
<ide> * [Chuck Fouts](https://github.com/gnarl)
<ide> * [Chip Warden](https://twitter.com/lgw4)
<ide> * [Ethan Schoonover](http://ethanschoonover.com)
<del>* [Chi Trung Nguyen](http://www.napcaesmind.de)
<add>* [Chi Trung Nguyen](http://ctnguyen.net)
<ide> * [Danny Amey](http://www.dannyamey.com/)
<ide> * Oscar
<ide> * [Brian Pollack](http://www.protovate.com)
| 1
|
PHP
|
PHP
|
remove unused method
|
b9a51803b7fd191e9665db1fd9f9686db6755ad4
|
<ide><path>src/Illuminate/Database/Eloquent/Model.php
<ide> public static function findOrNew($id, $columns = ['*'])
<ide> return new static;
<ide> }
<ide>
<del> /**
<del> * Refresh the current model with the current attributes from the database.
<del> *
<del> * @return void
<del> */
<del> protected function refresh()
<del> {
<del> $fresh = $this->fresh();
<del>
<del> $this->setRawAttributes($fresh->getAttributes());
<del>
<del> $this->setRelations($fresh->getRelations());
<del> }
<del>
<ide> /**
<ide> * Reload a fresh model instance from the database.
<ide> *
| 1
|
Mixed
|
Go
|
add more cgroup config to docker info
|
458ec418cd11989c58d4a4485436eccadf91a91d
|
<ide><path>api/client/info.go
<ide> func (cli *DockerCli) CmdInfo(args ...string) error {
<ide> if !info.SwapLimit {
<ide> fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
<ide> }
<add> if !info.OomKillDisable {
<add> fmt.Fprintf(cli.err, "WARNING: No oom kill disable support\n")
<add> }
<add> if !info.CPUCfsQuota {
<add> fmt.Fprintf(cli.err, "WARNING: No cpu cfs quota support\n")
<add> }
<add> if !info.CPUCfsPeriod {
<add> fmt.Fprintf(cli.err, "WARNING: No cpu cfs period support\n")
<add> }
<add> if !info.CPUShares {
<add> fmt.Fprintf(cli.err, "WARNING: No cpu shares support\n")
<add> }
<add> if !info.CPUSet {
<add> fmt.Fprintf(cli.err, "WARNING: No cpuset support\n")
<add> }
<ide> if !info.IPv4Forwarding {
<ide> fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled\n")
<ide> }
<ide><path>api/types/types.go
<ide> type Info struct {
<ide> SwapLimit bool
<ide> CPUCfsPeriod bool `json:"CpuCfsPeriod"`
<ide> CPUCfsQuota bool `json:"CpuCfsQuota"`
<add> CPUShares bool
<add> CPUSet bool
<ide> IPv4Forwarding bool
<ide> BridgeNfIptables bool
<ide> BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
<ide><path>daemon/info.go
<ide> func (daemon *Daemon) SystemInfo() (*types.Info, error) {
<ide> v.OomKillDisable = sysInfo.OomKillDisable
<ide> v.CPUCfsPeriod = sysInfo.CPUCfsPeriod
<ide> v.CPUCfsQuota = sysInfo.CPUCfsQuota
<add> v.CPUShares = sysInfo.CPUShares
<add> v.CPUSet = sysInfo.Cpuset
<ide> }
<ide>
<ide> if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
<ide><path>docs/reference/api/docker_remote_api.md
<ide> list of DNS options to be used in the container.
<ide> * `POST /build` now optionally takes a serialized map of build-time variables.
<ide> * `GET /events` now includes a `timenano` field, in addition to the existing `time` field.
<ide> * `GET /events` now supports filtering by image and container labels.
<del>* `GET /info` now lists engine version information.
<add>* `GET /info` now lists engine version information and return the information of `CPUShares` and `Cpuset`.
<ide> * `GET /containers/json` will return `ImageID` of the image used by container.
<ide> * `POST /exec/(name)/start` will now return an HTTP 409 when the container is either stopped or paused.
<ide>
| 4
|
Text
|
Text
|
fix code snippet in tutorial (async logic)
|
7adc605aef42bb8fa228be3e2e5bd22d37342c7e
|
<ide><path>docs/tutorials/essentials/part-5-async-logic.md
<ide> export const addNewPost = createAsyncThunk(
<ide> // The payload creator receives the partial `{title, content, user}` object
<ide> async initialPost => {
<ide> // We send the initial data to the fake API server
<del> const response = await client.post('/fakeApi/posts', { post: initialPost })
<add> const response = await client.post('/fakeApi/posts', initialPost)
<ide> // The response includes the complete post object, including unique ID
<ide> return response.data
<ide> }
| 1
|
Ruby
|
Ruby
|
fix unreplaced class var
|
68ff2ebbea15936b618cd0476f4acd24728a6348
|
<ide><path>activerecord/lib/active_record/connection_adapters/abstract/connection_specification.rb
<ide> def verify_active_connections!
<ide> end
<ide>
<ide> active_connections.each_value do |connection|
<del> connection.verify!(@@connection_cache_timeout)
<add> connection.verify!(@@verification_timeout)
<ide> end
<ide> end
<ide>
| 1
|
Text
|
Text
|
change the contact person for clover
|
3053603998984f09ef01c5c7d42a9810413d4237
|
<ide><path>INTHEWILD.md
<ide> Currently, **officially** using Airflow:
<ide> 1. [Cleartax](https://cleartax.in/) [[@anks](https://github.com/anks) & [@codebuff](https://github.com/codebuff)]
<ide> 1. [Clicksign](https://clicksign.com/) [[@mbbernstein](https://github.com/mbbernstein) & [@jorgeac12](https://github.com/jorgeac12) & [@franklin390](https://github.com/franklin390)]
<ide> 1. [Cloudera](https://www.cloudera.com/) [[@phraniiac](https://github.com/phraniiac) & [@VivekPemawat](https://github.com/VivekPemawat)]
<del>1. [Clover Health](https://www.cloverhealth.com) [[@gwax](https://github.com/gwax) & [@vansivallab](https://github.com/vansivallab)]
<add>1. [Clover Health](https://www.cloverhealth.com) [[@ryansiu1995](https://github.com/ryansiu1995)]
<ide> 1. [Colgate-Palmolive](https://www.colgatepalmolive.com/) [[@fhoda](https://github.com/fhoda)]
<ide> 1. [Collectivehealth Inc.](https://www.collectivehealth.com) [[@retornam](https://github.com/retornam)]
<ide> 1. [Compass](https://www.compass.com) [[@wdhorton](https://github.com/wdhorton)]
| 1
|
Ruby
|
Ruby
|
remove exact match from `brew cask search`
|
14b3b82fcaa5c3e44c5e89de6b5ca802218f4a0e
|
<ide><path>Library/Homebrew/cask/lib/hbc/cli/abstract_command.rb
<ide> class AbstractCommand
<ide> option "--debug", :debug, false
<ide> option "--verbose", :verbose, false
<ide> option "--outdated", :outdated_only, false
<del> option "--require-sha", :require_sha, false
<add> option "--require-sha", :require_sha, false
<ide>
<ide> def self.command_name
<ide> @command_name ||= name.sub(/^.*:/, "").gsub(/(.)([A-Z])/, '\1_\2').downcase
<ide> def casks(alternative: -> { [] })
<ide> casks = args.empty? ? alternative.call : args
<ide> @casks = casks.map { |cask| CaskLoader.load(cask) }
<ide> rescue CaskUnavailableError => e
<del> reason = [e.reason, suggestion_message(e.token)].join(" ")
<add> reason = [e.reason, *suggestion_message(e.token)].join(" ")
<ide> raise e.class.new(e.token, reason)
<ide> end
<ide>
<ide> def suggestion_message(cask_token)
<del> exact_match, partial_matches = Search.search(cask_token)
<add> matches, = Search.search(cask_token)
<ide>
<del> if exact_match.nil? && partial_matches.count == 1
<del> exact_match = partial_matches.first
<del> end
<del>
<del> if exact_match
<del> "Did you mean “#{exact_match}”?"
<del> elsif !partial_matches.empty?
<add> if matches.one?
<add> "Did you mean “#{matches.first}”?"
<add> elsif !matches.empty?
<ide> "Did you mean one of these?\n"
<del> .concat(Formatter.columns(partial_matches.take(20)))
<del> else
<del> ""
<add> .concat(Formatter.columns(matches.take(20)))
<ide> end
<ide> end
<ide> end
<ide><path>Library/Homebrew/cask/lib/hbc/cli/search.rb
<ide> def self.extract_regexp(string)
<ide> end
<ide>
<ide> def self.search(*arguments)
<del> exact_match = nil
<ide> partial_matches = []
<ide> search_term = arguments.join(" ")
<ide> search_regexp = extract_regexp arguments.first
<ide> def self.search(*arguments)
<ide> else
<ide> simplified_tokens = all_tokens.map { |t| t.sub(%r{^.*\/}, "").gsub(/[^a-z0-9]+/i, "") }
<ide> simplified_search_term = search_term.sub(/\.rb$/i, "").gsub(/[^a-z0-9]+/i, "")
<del> exact_match = simplified_tokens.grep(/^#{simplified_search_term}$/i) { |t| all_tokens[simplified_tokens.index(t)] }.first
<ide> partial_matches = simplified_tokens.grep(/#{simplified_search_term}/i) { |t| all_tokens[simplified_tokens.index(t)] }
<del> partial_matches.delete(exact_match)
<ide> end
<ide>
<ide> _, remote_matches = Homebrew.search_taps(search_term, silent: true)
<ide>
<del> [exact_match, partial_matches, remote_matches, search_term]
<add> [partial_matches, remote_matches, search_term]
<ide> end
<ide>
<del> def self.render_results(exact_match, partial_matches, remote_matches, search_term)
<add> def self.render_results(partial_matches, remote_matches, search_term)
<ide> unless $stdout.tty?
<del> puts [*exact_match, *partial_matches, *remote_matches]
<add> puts [*partial_matches, *remote_matches]
<ide> return
<ide> end
<ide>
<del> if !exact_match && partial_matches.empty? && remote_matches.empty?
<add> if partial_matches.empty? && remote_matches.empty?
<ide> puts "No Cask found for \"#{search_term}\"."
<ide> return
<ide> end
<del> if exact_match
<del> ohai "Exact Match"
<del> puts highlight_installed exact_match
<del> end
<ide>
<ide> unless partial_matches.empty?
<ide> if extract_regexp search_term
<ide> ohai "Regexp Matches"
<ide> else
<del> ohai "Partial Matches"
<add> ohai "Matches"
<ide> end
<ide> puts Formatter.columns(partial_matches.map(&method(:highlight_installed)))
<ide> end
<ide><path>Library/Homebrew/test/cask/cli/search_spec.rb
<ide> expect {
<ide> Hbc::CLI::Search.run("local")
<ide> }.to output(<<~EOS).to_stdout.as_tty
<del> ==> Partial Matches
<add> ==> Matches
<ide> local-caffeine
<ide> local-transmission
<ide> EOS
<ide> expect {
<ide> Hbc::CLI::Search.run("test-opera")
<ide> }.to output(<<~EOS).to_stdout.as_tty
<del> ==> Exact Match
<add> ==> Matches
<ide> test-opera
<del> ==> Partial Matches
<ide> test-opera-mail
<ide> EOS
<ide> end
| 3
|
Text
|
Text
|
update "man" of docker-run
|
22f0b27f59aacd1f9028a8d2ea37e1ed6372675e
|
<ide><path>man/docker-run.1.md
<ide> to the quota you specify.
<ide>
<ide> At any time you can run **docker ps** in
<ide> the other shell to view a list of the running containers. You can reattach to a
<del>detached container with **docker attach**. If you choose to run a container in
<del>the detached mode, then you cannot use the **-rm** option.
<add>detached container with **docker attach**.
<ide>
<ide> When attached in the tty mode, you can detach from the container (and leave it
<ide> running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`.
| 1
|
Java
|
Java
|
remove redundant type parameter in emptyaction
|
5cb9f418250009498b536de46e87c247ca6b5e68
|
<ide><path>src/main/java/rx/functions/Actions.java
<ide> private Actions() {
<ide> }
<ide>
<ide> @SuppressWarnings("unchecked")
<del> public static final <T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> EmptyAction<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> empty() {
<add> public static final <T0, T1, T2, T3, T4, T5, T6, T7, T8> EmptyAction<T0, T1, T2, T3, T4, T5, T6, T7, T8> empty() {
<ide> return EMPTY_ACTION;
<ide> }
<ide>
<ide> @SuppressWarnings("rawtypes")
<ide> private static final EmptyAction EMPTY_ACTION = new EmptyAction();
<ide>
<del> private static final class EmptyAction<T0, T1, T2, T3, T4, T5, T6, T7, T8, T9> implements
<add> private static final class EmptyAction<T0, T1, T2, T3, T4, T5, T6, T7, T8> implements
<ide> Action0,
<ide> Action1<T0>,
<ide> Action2<T0, T1>,
| 1
|
Python
|
Python
|
add progress for retry connections
|
c76b1c2f078c9e877b346ed13973cf45eede4a1b
|
<ide><path>celery/worker/consumer/consumer.py
<ide> """
<ide>
<ide> CONNECTION_RETRY_STEP = """\
<del>Trying again {when}...\
<add>Trying again {when}... ({retries}/{max_retries})\
<ide> """
<ide>
<ide> CONNECTION_ERROR = """\
<ide> def ensure_connected(self, conn):
<ide> def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP):
<ide> if getattr(conn, 'alt', None) and interval == 0:
<ide> next_step = CONNECTION_FAILOVER
<del> error(CONNECTION_ERROR, conn.as_uri(), exc,
<del> next_step.format(when=humanize_seconds(interval, 'in', ' ')))
<add> next_step = next_step.format(
<add> when=humanize_seconds(interval, 'in', ' '),
<add> retries=int(interval / 2),
<add> max_retries=self.app.conf.broker_connection_max_retries)
<add> error(CONNECTION_ERROR, conn.as_uri(), exc, next_step)
<ide>
<ide> # remember that the connection is lazy, it won't establish
<ide> # until needed.
<ide><path>t/unit/worker/test_consumer.py
<ide> def test_connect_error_handler(self):
<ide> errback = conn.ensure_connection.call_args[0][0]
<ide> errback(Mock(), 0)
<ide>
<add> @patch('celery.worker.consumer.consumer.error')
<add> def test_connect_error_handler_progress(self, error):
<add> self.app.conf.broker_connection_retry = True
<add> self.app.conf.broker_connection_max_retries = 3
<add> self.app._connection = _amqp_connection()
<add> conn = self.app._connection.return_value
<add> c = self.get_consumer()
<add> assert c.connect()
<add> errback = conn.ensure_connection.call_args[0][0]
<add> errback(Mock(), 2)
<add> assert error.call_args[0][3] == 'Trying again in 2.00 seconds... (1/3)'
<add> errback(Mock(), 4)
<add> assert error.call_args[0][3] == 'Trying again in 4.00 seconds... (2/3)'
<add> errback(Mock(), 6)
<add> assert error.call_args[0][3] == 'Trying again in 6.00 seconds... (3/3)'
<add>
<ide>
<ide> class test_Heart:
<ide>
| 2
|
Python
|
Python
|
remove redunant code from runpython operation
|
80bbe2265d1afd43f8519427ad8ce0b147aceb43
|
<ide><path>django/db/migrations/operations/special.py
<ide> def database_forwards(self, app_label, schema_editor, from_state, to_state):
<ide> # object, representing the versioned models as an app registry.
<ide> # We could try to override the global cache, but then people will still
<ide> # use direct imports, so we go with a documentation approach instead.
<del> if callable(self.code):
<del> self.code(models=from_state.render(), schema_editor=schema_editor)
<del> else:
<del> context = {
<del> "models": from_state.render(),
<del> "schema_editor": schema_editor,
<del> }
<del> eval(self.code, context)
<add> self.code(models=from_state.render(), schema_editor=schema_editor)
<ide>
<ide> def database_backwards(self, app_label, schema_editor, from_state, to_state):
<ide> if self.reverse_code is None:
<ide> raise NotImplementedError("You cannot reverse this operation")
<del> elif callable(self.reverse_code):
<del> self.reverse_code(models=from_state.render(), schema_editor=schema_editor)
<del> else:
<del> context = {
<del> "models": from_state.render(),
<del> "schema_editor": schema_editor,
<del> }
<del> eval(self.reverse_code, context)
<add> self.reverse_code(models=from_state.render(), schema_editor=schema_editor)
<ide>
<ide> def describe(self):
<ide> return "Raw Python operation"
| 1
|
Ruby
|
Ruby
|
trim the fat
|
42f8e3f611fa12b37793620ca2f7bd5c360631c8
|
<ide><path>activerecord/lib/active_record/associations.rb
<ide> def association_join
<ide> first_key = second_key = as_extra = nil
<ide>
<ide> if through_reflection.options[:as] # has_many :through against a polymorphic join
<del> ####polymorphic_foreign_key = through_reflection.options[:as].to_s + '_id'
<del> ####polymorphic_foreign_type = through_reflection.options[:as].to_s + '_type'
<del> ####
<del> ####" LEFT OUTER JOIN %s ON (%s.%s = %s.%s AND %s.%s = %s) " % [
<del> #### table_alias_for(through_reflection.klass.table_name, aliased_join_table_name),
<del> #### aliased_join_table_name, polymorphic_foreign_key,
<del> #### parent.aliased_table_name, parent.primary_key,
<del> #### aliased_join_table_name, polymorphic_foreign_type, klass.quote_value(parent.active_record.base_class.name)] +
<del> ####" LEFT OUTER JOIN %s ON %s.%s = %s.%s " % [table_name_and_alias,
<del> #### aliased_table_name, primary_key, aliased_join_table_name, options[:foreign_key] || reflection.klass.to_s.foreign_key
<ide> jt_foreign_key = through_reflection.options[:as].to_s + '_id'
<ide> jt_as_extra = " AND %s.%s = %s" % [
<ide> aliased_join_table_name,
<ide> reflection.active_record.connection.quote_column_name(through_reflection.options[:as].to_s + '_type'),
<ide> klass.quote_value(parent.active_record.base_class.name)
<ide> ]
<ide> else
<del> ##if source_reflection.macro == :has_many && source_reflection.options[:as]
<del> ####" LEFT OUTER JOIN %s ON %s.%s = %s.%s " % [
<del> #### table_alias_for(through_reflection.klass.table_name, aliased_join_table_name), aliased_join_table_name,
<del> #### through_reflection.primary_key_name,
<del> #### parent.aliased_table_name, parent.primary_key] +
<del> ####" LEFT OUTER JOIN %s ON %s.%s = %s.%s AND %s.%s = %s " % [
<del> #### table_name_and_alias,
<del> #### aliased_table_name, "#{source_reflection.options[:as]}_id",
<del> #### aliased_join_table_name, options[:foreign_key] || primary_key,
<del> #### aliased_table_name, "#{source_reflection.options[:as]}_type",
<del> #### klass.quote_value(source_reflection.active_record.base_class.name)
<ide> jt_foreign_key = through_reflection.primary_key_name
<ide> end
<ide>
<ide> def association_join
<ide> klass.quote_value(source_reflection.active_record.base_class.name)
<ide> ]
<ide> else
<del> ####case source_reflection.macro
<del> #### when :belongs_to
<del> #### first_key = primary_key
<del> #### second_key = source_reflection.options[:foreign_key] || klass.to_s.foreign_key
<del> #### extra = nil
<del> #### when :has_many
<del> #### first_key = through_reflection.klass.base_class.to_s.foreign_key
<del> #### second_key = options[:foreign_key] || primary_key
<del> #### extra = through_reflection.klass.descends_from_active_record? ? nil :
<del> #### " AND %s.%s = %s" % [
<del> #### aliased_join_table_name,
<del> #### reflection.active_record.connection.quote_column_name(through_reflection.active_record.inheritance_column),
<del> #### through_reflection.klass.quote_value(through_reflection.klass.name.demodulize)]
<del> ####end
<del> ####" LEFT OUTER JOIN %s ON (%s.%s = %s.%s%s) " % [
<del> #### table_alias_for(through_reflection.klass.table_name, aliased_join_table_name),
<del> #### aliased_join_table_name, through_reflection.primary_key_name,
<del> #### parent.aliased_table_name, parent.primary_key, extra] +
<del> ####" LEFT OUTER JOIN %s ON (%s.%s = %s.%s) " % [
<del> #### table_name_and_alias,
<del> #### aliased_table_name, first_key,
<del> #### aliased_join_table_name, second_key
<del> ####]
<ide> first_key = through_reflection.klass.base_class.to_s.foreign_key
<ide> second_key = options[:foreign_key] || primary_key
<ide> end
| 1
|
Javascript
|
Javascript
|
correct the assertion to make test pass on ie11
|
84c408ce63859e81fb4e900683edcc0cb8188890
|
<ide><path>test/ng/compileSpec.js
<ide> describe('$compile', function() {
<ide> element = $compile('<iframe srcdoc="{{html}}"></iframe>')($rootScope);
<ide> $rootScope.html = $sce.trustAsHtml('<div onclick="">hello</div>');
<ide> $rootScope.$digest();
<del> expect(angular.lowercase(element[0].srcdoc)).toEqual('<div onclick="">hello</div>');
<add> expect(angular.lowercase(element.attr('srcdoc'))).toEqual('<div onclick="">hello</div>');
<ide> }));
<ide> });
<ide> }
| 1
|
Javascript
|
Javascript
|
remove call to `net.socket.resume()`
|
d9a6d4afe7fd05f7634d2415dfdeba69f77a4ce0
|
<ide><path>test/parallel/test-child-process-fork-net2.js
<ide> if (process.argv[2] === 'child') {
<ide> console.error('[m] CLIENT: close event');
<ide> disconnected += 1;
<ide> });
<del> // XXX This resume() should be unnecessary.
<del> // a stream high water mark should be enough to keep
<del> // consuming the input.
<del> client.resume();
<ide> }
<ide> });
<ide>
| 1
|
Javascript
|
Javascript
|
use generatetransform for animation transforms
|
6139da0c9325a4b43e1750a97bbbc4873ecb701d
|
<ide><path>examples/js/loaders/FBXLoader.js
<ide> initialPosition: [ 0, 0, 0 ],
<ide> initialRotation: [ 0, 0, 0 ],
<ide> initialScale: [ 1, 1, 1 ],
<add> transform: getModelAnimTransform( rawModel ),
<ide>
<ide> };
<ide>
<del> if ( 'Lcl_Translation' in rawModel ) node.initialPosition = rawModel.Lcl_Translation.value;
<add> node.transform = getModelAnimTransform( rawModel );
<ide>
<del> if ( 'Lcl_Rotation' in rawModel ) node.initialRotation = rawModel.Lcl_Rotation.value;
<del>
<del> if ( 'Lcl_Scaling' in rawModel ) node.initialScale = rawModel.Lcl_Scaling.value;
<del>
<del> // if the animated model is pre rotated, we'll have to apply the pre rotations to every
<add> // if the animated model is pre or post rotated, we'll have to apply the pre rotations to every
<ide> // animation value as well
<ide> if ( 'PreRotation' in rawModel ) node.preRotations = rawModel.PreRotation.value;
<add> if ( 'PostRotation' in rawModel ) node.postRotations = rawModel.PostRotation.value;
<ide>
<ide> layerCurveNodes[ i ] = node;
<ide>
<ide>
<ide> }
<ide>
<add> function getModelAnimTransform( modelNode ) {
<add>
<add> var transformData = {};
<add>
<add> if ( 'RotationOrder' in modelNode ) transformData.eulerOrder = parseInt( modelNode.RotationOrder.value );
<add>
<add> if ( 'Lcl_Translation' in modelNode ) transformData.translation = modelNode.Lcl_Translation.value;
<add> if ( 'RotationOffset' in modelNode ) transformData.rotationOffset = modelNode.RotationOffset.value;
<add>
<add> if ( 'Lcl_Rotation' in modelNode ) transformData.rotation = modelNode.Lcl_Rotation.value;
<add> if ( 'PreRotation' in modelNode ) transformData.preRotation = modelNode.PreRotation.value;
<add>
<add> if ( 'PostRotation' in modelNode ) transformData.postRotation = modelNode.PostRotation.value;
<add>
<add> if ( 'Lcl_Scaling' in modelNode ) transformData.scale = modelNode.Lcl_Scaling.value;
<add>
<add> return generateTransform( transformData );
<add>
<add> }
<add>
<ide> // parse nodes in FBXTree.Objects.AnimationStack. These are the top level node in the animation
<ide> // hierarchy. Each Stack node will be used to create a THREE.AnimationClip
<ide> function parseAnimStacks( FBXTree, connections, layersMap ) {
<ide>
<ide> var tracks = [];
<ide>
<add> var initialPosition = new THREE.Vector3();
<add> var initialRotation = new THREE.Quaternion();
<add> var initialScale = new THREE.Vector3();
<add>
<add> if ( rawTracks.transform ) rawTracks.transform.decompose( initialPosition, initialRotation, initialScale );
<add>
<ide> if ( rawTracks.T !== undefined && Object.keys( rawTracks.T.curves ).length > 0 ) {
<ide>
<del> var positionTrack = generateVectorTrack( rawTracks.modelName, rawTracks.T.curves, rawTracks.initialPosition, 'position' );
<add> var positionTrack = generateVectorTrack( rawTracks.modelName, rawTracks.T.curves, initialPosition, 'position' );
<ide> if ( positionTrack !== undefined ) tracks.push( positionTrack );
<ide>
<ide> }
<ide>
<ide> if ( rawTracks.R !== undefined && Object.keys( rawTracks.R.curves ).length > 0 ) {
<ide>
<del> var rotationTrack = generateRotationTrack( rawTracks.modelName, rawTracks.R.curves, rawTracks.initialRotation, rawTracks.preRotations );
<add> var rotationTrack = generateRotationTrack( rawTracks.modelName, rawTracks.R.curves, initialRotation, rawTracks.preRotations, rawTracks.postRotations );
<ide> if ( rotationTrack !== undefined ) tracks.push( rotationTrack );
<ide>
<ide> }
<ide>
<ide> if ( rawTracks.S !== undefined && Object.keys( rawTracks.S.curves ).length > 0 ) {
<ide>
<del> var scaleTrack = generateVectorTrack( rawTracks.modelName, rawTracks.S.curves, rawTracks.initialScale, 'scale' );
<add> var scaleTrack = generateVectorTrack( rawTracks.modelName, rawTracks.S.curves, initialScale, 'scale' );
<ide> if ( scaleTrack !== undefined ) tracks.push( scaleTrack );
<ide>
<ide> }
<ide>
<ide> }
<ide>
<del> function generateRotationTrack( modelName, curves, initialValue, preRotations ) {
<add> function generateRotationTrack( modelName, curves, initialValue, preRotations, postRotations ) {
<ide>
<ide> if ( curves.x !== undefined ) {
<ide>
<ide>
<ide> }
<ide>
<add> if ( postRotations !== undefined ) {
<add>
<add> postRotations = postRotations.map( THREE.Math.degToRad );
<add> postRotations.push( 'ZYX' );
<add>
<add> postRotations = new THREE.Euler().fromArray( postRotations );
<add> postRotations = new THREE.Quaternion().setFromEuler( postRotations );
<add>
<add> }
<add>
<ide> var quaternion = new THREE.Quaternion();
<ide> var euler = new THREE.Euler();
<ide>
| 1
|
Text
|
Text
|
add default value for justify-content
|
3b0a75e6484145001950d6a87ed19de0609e1385
|
<ide><path>curriculum/challenges/english/01-responsive-web-design/css-flexbox/align-elements-using-the-justify-content-property.english.md
<ide> Sometimes the flex items within a flex container do not fill all the space in th
<ide> <a href="https://www.w3.org/TR/css-flexbox-1/images/flex-direction-terms.svg" target="_blank">Here is a useful image showing a row to illustrate the concepts below.</a>
<ide> Recall that setting a flex container as a row places the flex items side-by-side from left-to-right. A flex container set as a column places the flex items in a vertical stack from top-to-bottom. For each, the direction the flex items are arranged is called the <strong>main axis</strong>. For a row, this is a horizontal line that cuts through each item. And for a column, the main axis is a vertical line through the items.
<ide> There are several options for how to space the flex items along the line that is the main axis. One of the most commonly used is <code>justify-content: center;</code>, which aligns all the flex items to the center inside the flex container. Others options include:
<del><ul><li><code>flex-start</code>: aligns items to the start of the flex container. For a row, this pushes the items to the left of the container. For a column, this pushes the items to the top of the container.</li><li><code>flex-end</code>: aligns items to the end of the flex container. For a row, this pushes the items to the right of the container. For a column, this pushes the items to the bottom of the container.</li><li><code>space-between</code>: aligns items to the center of the main axis, with extra space placed between the items. The first and last items are pushed to the very edge of the flex container. For example, in a row the first item is against the left side of the container, the last item is against the right side of the container, then the remaining space is distributed evenly.</li><li><code>space-around</code>: similar to <code>space-between</code> but the first and last items are not locked to the edges of the container, the space is distributed around all the items with a half space on either end of the flex container.</li><li><code>space-evenly</code>: Distributes space evenly between the flex items with a full space at either end of the flex container</li></ul>
<add>
<add><ul><li><code>flex-start</code>: aligns items to the start of the flex container. For a row, this pushes the items to the left of the container. For a column, this pushes the items to the top of the container. This is the default alignment if no <code>justify-content</code> is specified.</li><li><code>flex-end</code>: aligns items to the end of the flex container. For a row, this pushes the items to the right of the container. For a column, this pushes the items to the bottom of the container.</li><li><code>space-between</code>: aligns items to the center of the main axis, with extra space placed between the items. The first and last items are pushed to the very edge of the flex container. For example, in a row the first item is against the left side of the container, the last item is against the right side of the container, then the remaining space is distributed evenly among the other items.</li><li><code>space-around</code>: similar to <code>space-between</code> but the first and last items are not locked to the edges of the container, the space is distributed around all the items with a half space on either end of the flex container.</li><li><code>space-evenly</code>: Distributes space evenly between the flex items with a full space at either end of the flex container</li></ul>
<ide> </section>
<ide>
<ide> ## Instructions
| 1
|
Text
|
Text
|
remove dead link
|
4ab62a6bd2b38464e129ad95360d9de75c8133b5
|
<ide><path>docs/docs/tutorial.md
<ide> var CommentForm = React.createClass({
<ide>
<ide> React attaches event handlers to components using a camelCase naming convention. We attach an `onSubmit` handler to the form that clears the form fields when the form is submitted with valid input.
<ide>
<del>We always return `false` from the event handler to prevent the browser's default action of submitting the form. (If you prefer, you can instead take the event as an argument and call `preventDefault()` on it – read more about [event handling](event-handling.html).)
<add>We always return `false` from the event handler to prevent the browser's default action of submitting the form. (If you prefer, you can instead take the event as an argument and call `preventDefault()` on it.)
<ide>
<ide> ##### Refs
<ide>
| 1
|
Ruby
|
Ruby
|
skip outdated check when offline
|
a8a16928e4e7f77a8f2d73d9dd2fef9d8383fc2a
|
<ide><path>Library/Homebrew/cmd/doctor.rb
<ide> def check_for_outdated_homebrew
<ide> HOMEBREW_REPOSITORY.cd do
<ide> if File.directory? ".git"
<ide> local = `git rev-parse -q --verify refs/remotes/origin/master`.chomp
<del> remote = /^([a-f0-9]{40})/.match(`git ls-remote origin refs/heads/master`)[0]
<del> return if local == remote
<add> remote = /^([a-f0-9]{40})/.match(`git ls-remote origin refs/heads/master 2>/dev/null`)
<add> if remote.nil? || local == remote[0]
<add> return
<add> end
<ide> end
<ide>
<ide> timestamp = if File.directory? ".git"
| 1
|
Go
|
Go
|
change testgraphdriver signature to fix linting
|
dac5710b689fc6e0614c6b20a11017ad30e907f8
|
<ide><path>integration/plugin/graphdriver/external_test.go
<ide> func testExternalGraphDriver(ext string, ec map[string]*graphEventsCounter) func
<ide>
<ide> ctx := context.Background()
<ide>
<del> testGraphDriver(t, c, ctx, driverName, func(t *testing.T) {
<add> testGraphDriver(ctx, t, c, driverName, func(t *testing.T) {
<ide> d.Restart(t, "-s", driverName)
<ide> })
<ide>
<ide> func TestGraphdriverPluginV2(t *testing.T) {
<ide> d.Stop(t)
<ide> d.StartWithBusybox(t, "-s", plugin, "--storage-opt", "overlay2.override_kernel_check=1")
<ide>
<del> testGraphDriver(t, client, ctx, plugin, nil)
<add> testGraphDriver(ctx, t, client, plugin, nil)
<ide> }
<ide>
<del>// nolint: golint
<del>func testGraphDriver(t *testing.T, c client.APIClient, ctx context.Context, driverName string, afterContainerRunFn func(*testing.T)) { //nolint: golint
<add>func testGraphDriver(ctx context.Context, t *testing.T, c client.APIClient, driverName string, afterContainerRunFn func(*testing.T)) {
<ide> id := container.Run(ctx, t, c, container.WithCmd("sh", "-c", "echo hello > /hello"))
<ide>
<ide> if afterContainerRunFn != nil {
| 1
|
Text
|
Text
|
fix comment about http2.createsecureserver
|
4218afce9aabb2a4df9b6156bfb839b030190ba1
|
<ide><path>doc/api/http2.md
<ide> const options = {
<ide> cert: fs.readFileSync('server-cert.pem')
<ide> };
<ide>
<del>// Create a plain-text HTTP/2 server
<add>// Create a secure HTTP/2 server
<ide> const server = http2.createSecureServer(options);
<ide>
<ide> server.on('stream', (stream, headers) => {
| 1
|
Ruby
|
Ruby
|
improve gnu url check
|
f4c301a82765a740f0d880504f549abdbce64929
|
<ide><path>Library/Homebrew/cmd/audit.rb
<ide> def audit_formula_urls f
<ide>
<ide> # Check GNU urls
<ide> urls.each do |p|
<del> if p =~ %r[ftp\.gnu\.org]
<del> problems << " * ftpmirror.gnu.org is preferred for GNU software."
<add> if p =~ %r[^(https?|ftp)://(.+)/gnu/]
<add> problems << " * \"ftpmirror.gnu.org\" is preferred for GNU software."
<ide> end
<ide> end
<ide>
| 1
|
Ruby
|
Ruby
|
remove doubled period
|
6e78fdbef06e0fc7135dac5bc71cdcb4b81911c6
|
<ide><path>railties/lib/rails/plugin.rb
<ide> def load_tasks
<ide> extra_tasks = Dir["#{root}/{tasks,rails/tasks}/**/*.rake"]
<ide>
<ide> unless extra_tasks.empty?
<del> ActiveSupport::Deprecation.warn "Rake tasks in #{extra_tasks.to_sentence} are deprecated. Use lib/tasks instead."
<add> ActiveSupport::Deprecation.warn "Rake tasks in #{extra_tasks.to_sentence} are deprecated. Use lib/tasks instead"
<ide> extra_tasks.sort.each { |ext| load(ext) }
<ide> end
<ide> end
| 1
|
Javascript
|
Javascript
|
increase timeout for configtestcases
|
378f31b4f96b9db85202167265a71d90eb85439c
|
<ide><path>test/ConfigTestCases.test.js
<ide> describe("ConfigTestCases", () => {
<ide> const casesPath = path.join(__dirname, "configCases");
<ide> let categories = fs.readdirSync(casesPath);
<ide>
<del> jest.setTimeout(10000);
<add> jest.setTimeout(20000);
<ide>
<ide> categories = categories.map(cat => {
<ide> return {
| 1
|
Ruby
|
Ruby
|
parallelize activesupport test suite
|
5ff5e5f63863949f2c59ae5ab02b6499031903b2
|
<ide><path>activesupport/test/abstract_unit.rb
<ide> I18n.enforce_available_locales = false
<ide>
<ide> class ActiveSupport::TestCase
<add> parallelize
<add>
<ide> include ActiveSupport::Testing::MethodCallAssertions
<ide>
<ide> private
| 1
|
Mixed
|
Ruby
|
adjust docs and more internal code for python 3
|
70253f0009ee8095a5d10ee7bdd891f1fe5cc35c
|
<ide><path>Library/Homebrew/compat/dependency_collector.rb
<ide> def parse_symbol_spec(spec, tags)
<ide> output_deprecation(spec, "open-mpi")
<ide> Dependency.new("open-mpi", tags)
<ide> when :python, :python2
<add> output_deprecation(spec, "python@2")
<add> Dependency.new("python@2", tags)
<add> when :python3
<ide> output_deprecation(spec, "python")
<ide> Dependency.new("python", tags)
<del> when :python3
<del> output_deprecation(spec, "python3")
<del> Dependency.new("python3", tags)
<ide> when :emacs, :mysql, :perl, :postgresql, :rbenv, :ruby
<ide> output_deprecation(spec)
<ide> Dependency.new(spec.to_s, tags)
<ide><path>Library/Homebrew/compat/requirements.rb
<ide> class PostgresqlRequirement < Requirement
<ide> class PythonRequirement < Requirement
<ide> fatal true
<ide> satisfy do
<del> odeprecated("PythonRequirement", "'depends_on \"python\"'")
<del> which "python"
<add> odeprecated("PythonRequirement", "'depends_on \"python@2\"'")
<add> which "python2"
<ide> end
<ide> end
<ide>
<ide> class Python3Requirement < Requirement
<ide> fatal true
<ide> satisfy do
<del> odeprecated("Python3Requirement", "'depends_on \"python3\"'")
<del> which "python3"
<add> odeprecated("Python3Requirement", "'depends_on \"python\"'")
<add> which "python"
<ide> end
<ide> end
<ide>
<ide><path>Library/Homebrew/compat/requirements/language_module_requirement.rb
<ide> def the_test
<ide> when :perl
<ide> ["/usr/bin/env", "perl", "-e", "use #{@import_name}"]
<ide> when :python
<del> ["/usr/bin/env", "python", "-c", "import #{@import_name}"]
<add> ["/usr/bin/env", "python2", "-c", "import #{@import_name}"]
<ide> when :python3
<del> ["/usr/bin/env", "python3", "-c", "import #{@import_name}"]
<add> ["/usr/bin/env", "python", "-c", "import #{@import_name}"]
<ide> when :ruby
<ide> ["/usr/bin/env", "ruby", "-rubygems", "-e", "require '#{@import_name}'"]
<ide> end
<ide> def command_line
<ide> when :lua then "luarocks-5.2 install"
<ide> when :lua51 then "luarocks-5.1 install"
<ide> when :perl then "cpan -i"
<del> when :python then "pip install"
<del> when :python3 then "pip3 install"
<add> when :python then "pip3 install"
<add> when :python3 then "pip install"
<ide> when :ruby then "gem install"
<ide> end
<ide> end
<ide><path>Library/Homebrew/diagnostic.rb
<ide> def check_for_old_homebrew_share_python_in_path
<ide> from your PATH variable.
<ide> Python scripts will now install into #{HOMEBREW_PREFIX}/bin.
<ide> You can delete anything, except 'Extras', from the #{HOMEBREW_PREFIX}/share/python
<del> (and #{HOMEBREW_PREFIX}/share/python3) dir and install affected Python packages
<add> (and #{HOMEBREW_PREFIX}/share/python@2) dir and install affected Python packages
<ide> anew with `pip install --upgrade`.
<ide> EOS
<ide> end
<ide><path>Library/Homebrew/exceptions.rb
<ide> class FormulaAmbiguousPythonError < RuntimeError
<ide> def initialize(formula)
<ide> super <<~EOS
<ide> The version of python to use with the virtualenv in the `#{formula.full_name}` formula
<del> cannot be guessed automatically. If the simultaneous use of python and python3
<del> is intentional, please add `:using => "python"` or `:using => "python3"` to
<add> cannot be guessed automatically. If the simultaneous use of python and python@2
<add> is intentional, please add `:using => "python"` or `:using => "python@2"` to
<ide> `virtualenv_install_with_resources` to resolve the ambiguity manually.
<ide> EOS
<ide> end
<ide><path>Library/Homebrew/formula.rb
<ide> def go_resource(name, &block)
<ide> # # `build.with?` or `build.without? "another_formula"`:
<ide> # depends_on "postgresql" if build.without? "sqlite"
<ide> #
<add> # <pre># Python 3.x if the `--with-python` is given to `brew install example`
<add> # depends_on "python3" => :optional</pre>
<ide> # <pre># Python 2.7:
<del> # depends_on "python"</pre>
<add> # depends_on "python@2"</pre>
<ide> # <pre># Python 2.7 but use system Python where possible
<del> # depends_on "python" if MacOS.version <= :snow_leopard</pre>
<del> # <pre># Python 3.x if the `--with-python3` is given to `brew install example`
<del> # depends_on "python3" => :optional</pre>
<add> # depends_on "python@2" if MacOS.version <= :snow_leopard</pre>
<ide> def depends_on(dep)
<ide> specs.each { |spec| spec.depends_on(dep) }
<ide> end
<ide><path>Library/Homebrew/language/python.rb
<ide> def self.included(base)
<ide> # @param venv_root [Pathname, String] the path to the root of the virtualenv
<ide> # (often `libexec/"venv"`)
<ide> # @param python [String] which interpreter to use (e.g. "python"
<del> # or "python3")
<add> # or "python2")
<ide> # @param formula [Formula] the active Formula
<ide> # @return [Virtualenv] a {Virtualenv} instance
<ide> def virtualenv_create(venv_root, python = "python", formula = self)
<ide> def virtualenv_create(venv_root, python = "python", formula = self)
<ide>
<ide> # Returns true if a formula option for the specified python is currently
<ide> # active or if the specified python is required by the formula. Valid
<del> # inputs are "python", "python3", :python, and :python3. Note that
<del> # "with-python", "without-python", "with-python3", and "without-python3"
<add> # inputs are "python", "python2", :python, and :python2. Note that
<add> # "with-python", "without-python", "with-python@2", and "without-python@2"
<ide> # formula options are handled correctly even if not associated with any
<ide> # corresponding depends_on statement.
<ide> # @api private
<ide> def needs_python?(python)
<ide> # Helper method for the common case of installing a Python application.
<ide> # Creates a virtualenv in `libexec`, installs all `resource`s defined
<ide> # on the formula, and then installs the formula. An options hash may be
<del> # passed (e.g., :using => "python3") to override the default, guessed
<del> # formula preference for python or python3, or to resolve an ambiguous
<del> # case where it's not clear whether python or python3 should be the
<add> # passed (e.g., :using => "python") to override the default, guessed
<add> # formula preference for python or python2, or to resolve an ambiguous
<add> # case where it's not clear whether python or python2 should be the
<ide> # default guess.
<ide> def virtualenv_install_with_resources(options = {})
<ide> python = options[:using]
<ide> if python.nil?
<del> wanted = %w[python python@2 python@3 python3].select { |py| needs_python?(py) }
<add> wanted = %w[python python@2 python2 python3].select { |py| needs_python?(py) }
<ide> raise FormulaAmbiguousPythonError, self if wanted.size > 1
<del> python = wanted.first || "python2.7"
<add> python = wanted.first || "python"
<ide> end
<ide> venv = virtualenv_create(libexec, python.delete("@"))
<ide> venv.pip_install resources
<ide> class Virtualenv
<ide> # @param venv_root [Pathname, String] the path to the root of the
<ide> # virtualenv
<ide> # @param python [String] which interpreter to use; i.e. "python" or
<del> # "python3"
<add> # "python2"
<ide> def initialize(formula, venv_root, python)
<ide> @formula = formula
<ide> @venv_root = Pathname.new(venv_root)
<ide> def create
<ide> end
<ide> end
<ide>
<del> # Robustify symlinks to survive python3 patch upgrades
<add> # Robustify symlinks to survive python patch upgrades
<ide> @venv_root.find do |f|
<ide> next unless f.symlink?
<ide> next unless (rp = f.realpath.to_s).start_with? HOMEBREW_CELLAR
<del> python = rp.include?("python3") ? "python3" : "python"
<add> python = rp.include?("python2") ? "python2" : "python"
<ide> new_target = rp.sub %r{#{HOMEBREW_CELLAR}/#{python}/[^/]+}, Formula[python].opt_prefix
<ide> f.unlink
<ide> f.make_symlink new_target
<ide> end
<ide>
<ide> Pathname.glob(@venv_root/"lib/python*/orig-prefix.txt").each do |prefix_file|
<ide> prefix_path = prefix_file.read
<del> python = prefix_path.include?("python3") ? "python3" : "python"
<add> python = prefix_path.include?("python2") ? "python2" : "python"
<ide> prefix_path.sub! %r{^#{HOMEBREW_CELLAR}/#{python}/[^/]+}, Formula[python].opt_prefix
<ide> prefix_file.atomic_write prefix_path
<ide> end
<ide><path>Library/Homebrew/test/language_module_requirement_spec.rb
<ide> it "does not satisfy invalid dependencies" do
<ide> expect(described_class.new(:python, "notapackage")).not_to be_satisfied
<ide> end
<del>
<del> it "satisfies valid dependencies" do
<del> expect(described_class.new(:python, "datetime")).to be_satisfied
<del> end
<ide> end
<ide>
<ide> context "when the language is Ruby" do
<ide><path>docs/Gems,-Eggs-and-Perl-Modules.md
<ide> Starting with OS X Lion (10.7), you need `sudo` to install to these like
<ide> so: `sudo gem install`, `sudo easy_install` or `sudo cpan -i`.
<ide>
<ide> An option to avoid sudo is to use an access control list:
<del>`chmod +a 'user:YOUR_NAME_HERE allow add_subdirectory,add_file,delete_child,directory_inherit' /Library/Python/2.7/site-packages`,
<del>for example, will let you add packages to Python 2.7 as yourself. That
<add>`chmod +a 'user:YOUR_NAME_HERE allow add_subdirectory,add_file,delete_child,directory_inherit' /Library/Python/3.6/site-packages`,
<add>for example, will let you add packages to Python 3.6 as yourself. That
<ide> is probably safer than changing the group ownership of the directory.
<ide>
<ide> ### So why was I using sudo?
<ide> Rather than changing the rights on `/Library/Python`, we recommend the
<ide> following options:
<ide>
<ide> ### With a brewed Python
<del>Note, `easy_install` is deprecated. We install `pip` (or `pip3` for
<del>Python 3) along with python/python3.
<add>Note, `easy_install` is deprecated. We install `pip` (or `pip2` for
<add>Python 2) along with python/python2.
<ide>
<ide> We set up distutils such that `pip install` will always put modules in
<ide> `$(brew --prefix)/lib/pythonX.Y/site-packages` and scripts in
<ide> `$(brew --prefix)/share/python`. Therefore, you won’t need sudo!
<ide>
<del>Do `brew info python` or `brew info python3` for precise information
<add>Do `brew info python` or `brew info python@2` for precise information
<ide> about the paths. Note, a brewed Python still searches for modules in
<ide> `/Library/Python/X.Y/site-packages` and also in
<ide> `~/Library/Python/X.Y/lib/python/site-packages`.
<ide><path>docs/Homebrew-and-Python.md
<ide> This page describes how Python is handled in Homebrew for users. See [Python for
<ide>
<ide> Homebrew should work with any [CPython](https://stackoverflow.com/questions/2324208/is-there-any-difference-between-cpython-and-python) and defaults to the macOS system Python.
<ide>
<del>Homebrew provides formulae to brew a more up-to-date Python 2.7.x and 3.x.
<add>Homebrew provides formulae to brew 3.x and a more up-to-date Python 2.7.x.
<ide>
<del>**Important:** If you choose to install a Python which isn't either of these two (system Python or brewed Python), the Homebrew team can only provide limited support.
<add>**Important:** If you choose to install a Python which isn't either of these two (system Python or brewed Python), the Homebrew team cannot support any breakage that may occur.
<ide>
<del>## Python 2.x or Python 3.x
<add>## Python 3.x or Python 2.x
<ide> Homebrew provides one formula for Python 2.7.x and another for Python 3.x. The executables are organized as follows so that Python 2 and Python 3 can both be installed without conflict:
<del>* `python` points to the macOS system Python (with no manual PATH modification)
<add>* `python` and `python3` point to Homebrew's Python 3.x (if installed) otherwise the macOS system Python
<ide> * `python2` points to Homebrew's Python 2.7.x (if installed)
<del>* `python3` points to Homebrew's Python 3.x (if installed)
<add>* `pip` and `pip3` point to Homebrew's Python 3.x's pip (if installed)
<ide> * `pip2` points to Homebrew's Python 2.7.x's pip (if installed)
<del>* `pip3` points to Homebrew's Python 3.x's pip (if installed)
<ide>
<ide> ([Wondering which one to choose?](https://wiki.python.org/moin/Python2orPython3))
<ide>
<ide> ## Setuptools, Pip, etc.
<del>The Python formulae install [pip](http://www.pip-installer.org) (as `pip2` or `pip3`) and [Setuptools](https://pypi.python.org/pypi/setuptools).
<add>The Python formulae install [pip](http://www.pip-installer.org) (as `pip` or `pip2`) and [Setuptools](https://pypi.python.org/pypi/setuptools).
<ide>
<ide> Setuptools can be updated via pip, without having to re-brew Python:
<ide>
<ide> ```sh
<del>python2 -m pip install --upgrade setuptools
<add>python -m pip install --upgrade setuptools
<ide> ```
<ide>
<ide> Similarly, pip can be used to upgrade itself via:
<ide>
<ide> ```sh
<del>python2 -m pip install --upgrade pip
<add>python -m pip install --upgrade pip
<ide> ```
<ide>
<ide> ### Note on `pip install --user`
<ide> The normal `pip install --user` is disabled for brewed Python. This is because o
<ide> A possible workaround (which puts executable scripts in `~/Library/Python/<X>.<Y>/bin`) is:
<ide>
<ide> ```sh
<del>python2 -m pip install --user --install-option="--prefix=" <package-name>
<add>python -m pip install --user --install-option="--prefix=" <package-name>
<ide> ```
<ide>
<ide> ## `site-packages` and the `PYTHONPATH`
<ide> The `site-packages` is a directory that contains Python modules (especially bind
<ide> $(brew --prefix)/lib/pythonX.Y/site-packages
<ide> ```
<ide>
<del>So, for Python 2.7.x, you'll find it at `/usr/local/lib/python2.7/site-packages`.
<add>So, for Python 3.6.x, you'll find it at `/usr/local/lib/python3.6/site-packages`.
<ide>
<del>Python 2.7 also searches for modules in:
<add>Python 3.6 also searches for modules in:
<ide>
<del>- `/Library/Python/2.7/site-packages`
<del>- `~/Library/Python/2.7/lib/python/site-packages`
<add>- `/Library/Python/3.6/site-packages`
<add>- `~/Library/Python/3.6/lib/python/site-packages`
<ide>
<ide> Homebrew's `site-packages` directory is first created if (1) any Homebrew formula with Python bindings are installed, or (2) upon `brew install python`.
<ide>
<ide> ### Why here?
<ide> The reasoning for this location is to preserve your modules between (minor) upgrades or re-installations of Python. Additionally, Homebrew has a strict policy never to write stuff outside of the `brew --prefix`, so we don't spam your system.
<ide>
<ide> ## Homebrew-provided Python bindings
<del>Some formulae provide Python bindings. Sometimes a `--with-python` or `--with-python3` option has to be passed to `brew install` in order to build the Python bindings. (Check with `brew options <formula>`.)
<del>
<del>Homebrew builds bindings against the first `python` (and `python-config`) in your `PATH`. (Check with `which python`).
<add>Some formulae provide Python bindings. Sometimes a `--with-python` or `--with-python@2` option has to be passed to `brew install` in order to build the Python bindings. (Check with `brew options <formula>`.)
<ide>
<ide> **Warning!** Python may crash (see [Common Issues](Common-Issues.md)) if you `import <module>` from a brewed Python if you ran `brew install <formula_with_python_bindings>` against the system Python. If you decide to switch to the brewed Python, then reinstall all formulae with Python bindings (e.g. `pyside`, `wxwidgets`, `pygtk`, `pygobject`, `opencv`, `vtk` and `boost-python`).
<ide>
<ide> Homebrew will still install Python modules into Homebrew's `site-packages` and *
<ide> Virtualenv has a `--system-site-packages` switch to allow "global" (i.e. Homebrew's) `site-packages` to be accessible from within the virtualenv.
<ide>
<ide> ## Why is Homebrew's Python being installed as a dependency?
<del>Formulae that declare an unconditional dependency on the `"python"` or `"python3"` formulae are bottled against Homebrew's Python 2.7.x or 3.x and require it to be installed.
<add>Formulae that declare an unconditional dependency on the `"python"` or `"python@2"` formulae are bottled against Homebrew's Python 3.x or 2.7.x and require it to be installed.
<ide><path>docs/Python-for-Formula-Authors.md
<ide> Applications should unconditionally bundle all of their Python-language dependen
<ide>
<ide> ### Python declarations
<ide>
<add>Formulae for apps that require Python 3 **should** declare an unconditional dependency on `"python"`. These apps **must** work with the current Homebrew Python 3.x formula.
<add>
<ide> Applications that are compatible with Python 2 **should** use the Apple-provided system Python in `/usr/bin` on systems that provide Python 2.7. To do this, declare:
<ide>
<ide> ```ruby
<del>depends_on "python" if MacOS.version <= :snow_leopard
<add>depends_on "python@2" if MacOS.version <= :snow_leopard
<ide> ```
<ide>
<ide> No explicit Python dependency is needed on recent OS versions since `/usr/bin` is always in `PATH` for Homebrew formulae; on Leopard and older, the `python` in `PATH` is used if it's at least version 2.7, or else Homebrew's Python 2.7.x is installed.
<ide>
<del>Formulae for apps that require Python 3 **should** declare an unconditional dependency on `"python3"`. These apps **must** work with the current Homebrew Python 3.x formula.
<del>
<ide> ### Installing
<ide>
<ide> Applications should be installed into a Python [virtualenv](https://virtualenv.pypa.io/en/stable/) environment rooted in `libexec`. This prevents the app's Python modules from contaminating the system site-packages and vice versa.
<ide> This is exactly the same as writing:
<ide> ```ruby
<ide> def install
<ide> # Create a virtualenv in `libexec`. If your app needs Python 3, make sure that
<del> # `depends_on "python3"` is declared, and use `virtualenv_create(libexec, "python3")`.
<add> # `depends_on "python"` is declared, and use `virtualenv_create(libexec, "python")`.
<ide> venv = virtualenv_create(libexec)
<ide> # Install all of the resources declared on the formula into the virtualenv.
<ide> venv.pip_install resources
<ide> in case you need to do different things for different resources.
<ide>
<ide> ## Bindings
<ide>
<del>Build bindings with the system Python by default (don't add an option) and they should be usable with any binary-compatible Python. If that isn't the case, it's an upstream bug; [here's some advice for resolving it](http://blog.tim-smith.us/2015/09/python-extension-modules-os-x/).
<add>To add bindings for Python 3, please add `depends_on "python"`.
<ide>
<del>To add bindings for Python 3, please add `depends_on "python3" => :optional` and make the bindings conditional on `build.with?("python3")`.
<add>Build Python 2 bindings with the system Python by default (don't add an option) and they should be usable with any binary-compatible Python. If that isn't the case, it's an upstream bug; [here's some advice for resolving it](http://blog.tim-smith.us/2015/09/python-extension-modules-os-x/).
<ide>
<ide> ### Dependencies
<ide>
<ide> Sometimes we have to `inreplace` a `Makefile` to use our prefix for the Python b
<ide>
<ide> ### Python declarations
<ide>
<del>Python 2 libraries do not need a `depends_on "python"` declaration; they will be built with the system Python, but should still be usable with any other Python 2.7. If this is not the case, it is an upstream bug; [here is some advice for resolving it](http://blog.tim-smith.us/2015/09/python-extension-modules-os-x/). Libraries built for Python 3 should include `depends_on "python3"`, which will bottle against Homebrew's Python 3.x. If a library supports both Python 2.x and Python 3.x, the `"python3"` dependency should be `:optional`. Python 2.x libraries must function when they are installed against either the system Python or brewed Python.
<add>Libraries built for Python 3 should include `depends_on "python"`, which will bottle against Homebrew's Python 3.x. Python 2.x libraries must function when they are installed against either the system Python or brewed Python.
<add>
<add>Python 2 libraries do not need a `depends_on "python@2"` declaration; they will be built with the system Python, but should still be usable with any other Python 2.7. If this is not the case, it is an upstream bug; [here is some advice for resolving it](http://blog.tim-smith.us/2015/09/python-extension-modules-os-x/).
<ide>
<ide> ### Installing
<ide>
| 11
|
Javascript
|
Javascript
|
add @nolint to fb bundle headers
|
8d7535e540c036a40754acb53d51f1fdcc8dc6d8
|
<ide><path>scripts/rollup/wrappers.js
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * @preserve-invariant-messages
<ide> */
<ide> ${source}
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * @preserve-invariant-messages
<ide> */
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * @preserve-invariant-messages
<ide> */
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @providesModule ${globalName}-dev
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> ${source}
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @providesModule ${globalName}-prod
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @providesModule ${globalName}-profiling
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> */
<ide> ${source}
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> */
<ide> ${source}`;
<ide> ${license}
<ide> *
<ide> * @noflow
<add> * @nolint
<ide> * @preventMunge
<ide> * ${'@gen' + 'erated'}
<ide> */
| 1
|
Ruby
|
Ruby
|
show real path to x11 in --config output
|
f74e616724c51001d832bb19f7acdc7f5349d8f6
|
<ide><path>Library/Homebrew/cmd/--config.rb
<ide> def sha
<ide> if sha.empty? then "(none)" else sha end
<ide> end
<ide>
<add> def describe_x11
<add> return "N/A" unless x11_installed?
<add> return case x11_path = Pathname.new("/usr/x11").realpath.to_s
<add> when "/usr/x11" then "/usr/x11"
<add> else "/usr/x11 => #{x11_path}"
<add> end
<add> end
<add>
<ide> def describe_perl
<ide> perl = `which perl`.chomp
<ide> return "N/A" if perl.empty?
<ide> def config_s; <<-EOS.undent
<ide> LLVM: #{llvm ? "build #{llvm}" : "N/A"}
<ide> Clang: #{clang ? "#{clang} build #{clang_build}" : "N/A"}
<ide> MacPorts or Fink? #{macports_or_fink_installed?}
<del> X11 installed? #{x11_installed?}
<add> X11: #{describe_x11}
<ide> System Ruby: #{RUBY_VERSION}-#{RUBY_PATCHLEVEL}
<ide> /usr/bin/ruby => #{real_path("/usr/bin/ruby")}
<ide> Which Perl: #{describe_perl}
| 1
|
Text
|
Text
|
add 1password for teams
|
5b1372e7587e5b1ea34301111d05685d773de728
|
<ide><path>README.md
<ide> Our bottles (binary packages) are hosted by Bintray.
<ide>
<ide> [](https://bintray.com/homebrew)
<ide>
<add>Secure password storage and syncing provided by [1Password for Teams](https://1password.com/teams/) by AgileBits
<add>
<add>[](https://agilebits.com)
<add>
<ide> Homebrew is a member of the [Software Freedom Conservancy](https://sfconservancy.org)
<ide>
<ide> [](https://sfconservancy.org)
| 1
|
Python
|
Python
|
drop references to `is_single_tower`
|
b860839fe9bc6304d8c5412e2ae1128c02bcf4a2
|
<ide><path>official/utils/misc/distribution_utils_test.py
<ide> class GetDistributionStrategyTest(tf.test.TestCase):
<ide> """Tests for get_distribution_strategy."""
<ide> def test_one_device_strategy_cpu(self):
<ide> ds = distribution_utils.get_distribution_strategy(0)
<del> self.assertTrue(ds.is_single_tower)
<ide> self.assertEquals(ds.num_towers, 1)
<ide> self.assertEquals(len(ds.worker_devices), 1)
<ide> self.assertIn('CPU', ds.worker_devices[0])
<ide>
<ide> def test_one_device_strategy_gpu(self):
<ide> ds = distribution_utils.get_distribution_strategy(1)
<del> self.assertTrue(ds.is_single_tower)
<ide> self.assertEquals(ds.num_towers, 1)
<ide> self.assertEquals(len(ds.worker_devices), 1)
<ide> self.assertIn('GPU', ds.worker_devices[0])
<ide>
<ide> def test_mirrored_strategy(self):
<ide> ds = distribution_utils.get_distribution_strategy(5)
<del> self.assertFalse(ds.is_single_tower)
<ide> self.assertEquals(ds.num_towers, 5)
<ide> self.assertEquals(len(ds.worker_devices), 5)
<ide> for device in ds.worker_devices:
| 1
|
Ruby
|
Ruby
|
handle the case `du` returns empty string
|
fd7f3b949659cedef62ac58258c652bea58ef1a7
|
<ide><path>Library/Homebrew/extend/pathname.rb
<ide> def abv
<ide> out = ""
<ide> n = Utils.popen_read("find", expand_path.to_s, "-type", "f", "!", "-name", ".DS_Store").split("\n").size
<ide> out << "#{n} files, " if n > 1
<del> out << Utils.popen_read("/usr/bin/du", "-hs", expand_path.to_s).split("\t")[0].strip
<add> size = Utils.popen_read("/usr/bin/du", "-hs", expand_path.to_s).split("\t")[0]
<add> size ||= "0B"
<add> out << size.strip
<ide> out
<ide> end
<ide>
| 1
|
Javascript
|
Javascript
|
add long stacktrace debugging facility
|
1f76a2eddc3561ff2c434d491e0d2b893c374cfd
|
<ide><path>lib/fs.js
<ide> var O_WRONLY = constants.O_WRONLY || 0;
<ide>
<ide> var isWindows = process.platform === 'win32';
<ide>
<del>function rethrow(err) {
<del> if (err) throw err;
<add>var DEBUG = process.env.NODE_DEBUG && /fs/.test(process.env.NODE_DEBUG);
<add>
<add>function rethrow() {
<add> // Only enable in debug mode. A backtrace uses ~1000 bytes of heap space and
<add> // is fairly slow to generate.
<add> if (DEBUG) {
<add> var backtrace = new Error;
<add> return function(err) {
<add> if (err) {
<add> backtrace.message = err.message;
<add> err = backtrace;
<add> throw err;
<add> }
<add> };
<add> }
<add>
<add> return function(err) {
<add> if (err) {
<add> throw err; // Forgot a callback but don't know where? Use NODE_DEBUG=fs
<add> }
<add> };
<ide> }
<ide>
<ide> function maybeCallback(cb) {
<del> return typeof cb === 'function' ? cb : rethrow;
<add> return typeof cb === 'function' ? cb : rethrow();
<ide> }
<ide>
<ide> // Ensure that callbacks run in the global context. Only use this function
<ide> // for callbacks that are passed to the binding layer, callbacks that are
<ide> // invoked from JS already run in the proper scope.
<ide> function makeCallback(cb) {
<ide> if (typeof cb !== 'function') {
<del> return rethrow;
<add> return rethrow();
<ide> }
<ide>
<ide> return function() {
<ide><path>test/fixtures/test-fs-readfile-error.js
<add>// Copyright Joyent, Inc. and other Node contributors.
<add>//
<add>// Permission is hereby granted, free of charge, to any person obtaining a
<add>// copy of this software and associated documentation files (the
<add>// "Software"), to deal in the Software without restriction, including
<add>// without limitation the rights to use, copy, modify, merge, publish,
<add>// distribute, sublicense, and/or sell copies of the Software, and to permit
<add>// persons to whom the Software is furnished to do so, subject to the
<add>// following conditions:
<add>//
<add>// The above copyright notice and this permission notice shall be included
<add>// in all copies or substantial portions of the Software.
<add>//
<add>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
<add>// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
<add>// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
<add>// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
<add>// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
<add>// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
<add>// USE OR OTHER DEALINGS IN THE SOFTWARE.
<add>
<add>require('fs').readFile('/'); // throws EISDIR
<ide><path>test/simple/test-fs-readfile-error.js
<add>// Copyright Joyent, Inc. and other Node contributors.
<add>//
<add>// Permission is hereby granted, free of charge, to any person obtaining a
<add>// copy of this software and associated documentation files (the
<add>// "Software"), to deal in the Software without restriction, including
<add>// without limitation the rights to use, copy, modify, merge, publish,
<add>// distribute, sublicense, and/or sell copies of the Software, and to permit
<add>// persons to whom the Software is furnished to do so, subject to the
<add>// following conditions:
<add>//
<add>// The above copyright notice and this permission notice shall be included
<add>// in all copies or substantial portions of the Software.
<add>//
<add>// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
<add>// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
<add>// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
<add>// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
<add>// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
<add>// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
<add>// USE OR OTHER DEALINGS IN THE SOFTWARE.
<add>
<add>var common = require('../common');
<add>var assert = require('assert');
<add>var exec = require('child_process').exec;
<add>var path = require('path');
<add>
<add>var callbacks = 0;
<add>
<add>function test(env, cb) {
<add> var filename = path.join(common.fixturesDir, 'test-fs-readfile-error.js');
<add> var execPath = process.execPath + ' ' + filename;
<add> var options = { env: env || {} };
<add> exec(execPath, options, function(err, stdout, stderr) {
<add> assert(err);
<add> assert.equal(stdout, '');
<add> assert.notEqual(stderr, '');
<add> cb('' + stderr);
<add> });
<add>}
<add>
<add>test({ NODE_DEBUG: '' }, function(data) {
<add> assert(/EISDIR/.test(data));
<add> assert(!/test-fs-readfile-error/.test(data));
<add> callbacks++;
<add>});
<add>
<add>test({ NODE_DEBUG: 'fs' }, function(data) {
<add> assert(/EISDIR/.test(data));
<add> assert(/test-fs-readfile-error/.test(data));
<add> callbacks++;
<add>});
<add>
<add>process.on('exit', function() {
<add> assert.equal(callbacks, 2);
<add>});
| 3
|
Javascript
|
Javascript
|
improve work-around for importscripts bug
|
097e273ca444804c0c3efd66d05096e14521eee8
|
<ide><path>src/display/api.js
<ide> var PDFWorker = (function PDFWorkerClosure() {
<ide> // https://bugzilla.mozilla.org/show_bug.cgi?id=683280
<ide> var worker = new Worker(workerSrc);
<ide> var messageHandler = new MessageHandler('main', 'worker', worker);
<del>//#if !PRODUCTION
<del> // Don't allow worker to be destroyed by Chrome, see:
<del> // https://code.google.com/p/chromium/issues/detail?id=572225
<del> var jsWorkerId = '_workerKungfuGrip_' + Math.random();
<del> window[jsWorkerId] = worker;
<del>//#endif
<ide> messageHandler.on('test', function PDFWorker_test(data) {
<del>//#if !PRODUCTION
<del> delete window[jsWorkerId];
<del>//#endif
<ide> if (this.destroyed) {
<ide> this._readyCapability.reject(new Error('Worker was destroyed'));
<ide> messageHandler.destroy();
<ide><path>src/worker_loader.js
<ide>
<ide> 'use strict';
<ide>
<add>//#if !PRODUCTION
<add>//// Patch importScripts to work around a bug in WebKit and Chrome 48-.
<add>//// See https://crbug.com/572225 and https://webkit.org/b/153317.
<add>self.importScripts = (function (importScripts) {
<add> return function() {
<add> setTimeout(function () {}, 0);
<add> return importScripts.apply(this, arguments);
<add> };
<add>})(importScripts);
<add>//#endif
<add>
<ide> importScripts('../node_modules/requirejs/require.js');
<ide>
<ide> require.config({paths: {'pdfjs': '.'}});
| 2
|
Text
|
Text
|
fix code url & minor issues
|
38eab96ecbb16c2ecf8c1bbb4d61459eb2502a7e
|
<ide><path>guide/english/algorithms/search-algorithms/jump-search/index.md
<ide> title: Jump Search
<ide> ---
<ide>
<ide> ## Jump Search
<add>
<ide> A jump search locates an item in a sorted array by jumping k items in the array and then verifies if the item wanted is between the previous jump and current jump.
<ide>
<ide> # Worst Case Complexity
<ide> O(√N)
<ide> # Code
<ide> To view examples of code implementation for this method, access this link below:
<ide>
<del>[Jump Search - OpenGenus/cosmos](https://github.com/OpenGenus/cosmos/tree/master/code/search/jump_search)
<add>[Jump Search - OpenGenus/cosmos](https://github.com/OpenGenus/cosmos/tree/master/code/search/src/jump_search)
<ide>
<ide> # Credits
<ide>
| 1
|
Text
|
Text
|
update example path
|
2e8bb9e41fccbbf00d3eae080c76e9631333af96
|
<ide><path>docs/build-instructions/os-x.md
<ide> ```
<ide>
<ide> ### `script/build` Options
<del> * `--install-dir` - The full path to the final built application (must include `.app` in the path), e.g. `script/build --install-dir full/path/to/Atom.app`
<add> * `--install-dir` - The full path to the final built application (must include `.app` in the path), e.g. `script/build --install-dir /Users/username/full/path/to/Atom.app`
<ide> * `--build-dir` - Build the application in this directory.
<ide> * `--verbose` - Verbose mode. A lot more information output.
<ide>
| 1
|
Python
|
Python
|
add kubeletversion assignment to _to_node()
|
8580a95ead328a77db60b88349a49ba95e69f29c
|
<ide><path>libcloud/container/drivers/kubernetes.py
<ide> def _to_node(self, data):
<ide> )
<ide> extra = {"memory": memory, "cpu": cpu}
<ide> extra["os"] = data["status"]["nodeInfo"]["operatingSystem"]
<add> extra["kubeletVersion"] = data["status"]["nodeInfo"]["kubeletVersion"]
<ide> # TODO: Find state
<ide> state = NodeState.UNKNOWN
<ide> public_ips, private_ips = [], []
| 1
|
Javascript
|
Javascript
|
add async chunk to test case
|
295f751e93e53a18216937e5d660e3b44f55c711
|
<ide><path>test/configCases/output/inner-dirs-entries/inner-dir/b.js
<ide> import dummy from "dummy_module";
<ide>
<del>it("should load", done => {
<add>it("should load", () => {
<ide> expect(dummy()).toBe("this is just a dummy function");
<del> done();
<add> return import("./some-module").then(importedModule => {
<add> expect(importedModule.dummy()).toBe("this is just a dummy function");
<add> });
<ide> });
<ide>
<ide> export { dummy };
<ide><path>test/configCases/output/inner-dirs-entries/inner-dir/some-module.js
<del>export dummy from "dummy-module";
<add>import dummy from "dummy_module";
<add>export { dummy };
| 2
|
Python
|
Python
|
convert cudnn weights in nested model.
|
aac81c29a762fdaef6718bbc4883f208283aac9c
|
<ide><path>keras/engine/saving.py
<ide> def load_attributes_from_hdf5_group(group, name):
<ide> chunk_id = 0
<ide> while ('%s%d' % (name, chunk_id)) in group.attrs:
<ide> data.extend([n.decode('utf8')
<del> for n in group.attrs['%s%d' % (name, chunk_id)]])
<add> for n in group.attrs['%s%d' % (name, chunk_id)]])
<ide> chunk_id += 1
<ide> return data
<ide>
<ide> def preprocess_weights_for_loading(layer, weights,
<ide> original_keras_version=None,
<ide> original_backend=None,
<ide> reshape=False):
<del> """Converts layers weights from Keras 1 format to Keras 2.
<add> """Converts layers weights from Keras 1 format to Keras 2 and also weights of CuDNN layers in Keras 2.
<ide>
<ide> # Arguments
<ide> layer: Layer instance.
<ide> def preprocess_weights_for_loading(layer, weights,
<ide> # Returns
<ide> A list of weights values (Numpy arrays).
<ide> """
<del> if layer.__class__.__name__ == 'Bidirectional':
<add> def convert_nested_bidirectional(weights):
<add> """Converts layers nested in `Bidirectional` wrapper by `preprocess_weights_for_loading()`.
<add>
<add> # Arguments
<add> weights: List of weights values (Numpy arrays).
<add> # Returns
<add> A list of weights values (Numpy arrays).
<add> """
<ide> num_weights_per_layer = len(weights) // 2
<ide> forward_weights = preprocess_weights_for_loading(layer.forward_layer,
<ide> weights[:num_weights_per_layer],
<ide> def preprocess_weights_for_loading(layer, weights,
<ide> weights[num_weights_per_layer:],
<ide> original_keras_version,
<ide> original_backend)
<del> weights = forward_weights + backward_weights
<add> return forward_weights + backward_weights
<add>
<add> def convert_nested_model(weights):
<add> """Converts layers nested in `Model` or `Sequential` by `preprocess_weights_for_loading()`.
<add>
<add> # Arguments
<add> weights: List of weights values (Numpy arrays).
<add> # Returns
<add> A list of weights values (Numpy arrays).
<add> """
<add> new_weights = []
<add> # trainable weights
<add> for sublayer in layer.layers:
<add> num_weights = len(sublayer.trainable_weights)
<add> if num_weights > 0:
<add> new_weights.extend(preprocess_weights_for_loading(
<add> layer=sublayer,
<add> weights=weights[:num_weights],
<add> original_keras_version=original_keras_version,
<add> original_backend=original_backend))
<add> weights = weights[num_weights:]
<add>
<add> # non-trainable weights
<add> for sublayer in layer.layers:
<add> num_weights = len([l for l in sublayer.weights
<add> if l not in sublayer.trainable_weights])
<add> if num_weights > 0:
<add> new_weights.extend(preprocess_weights_for_loading(
<add> layer=sublayer,
<add> weights=weights[:num_weights],
<add> original_keras_version=original_keras_version,
<add> original_backend=original_backend))
<add> weights = weights[num_weights:]
<add> return new_weights
<add>
<add> # Convert layers nested in Bidirectional/Model/Sequential.
<add> # Both transformation should be ran for both Keras 1->2 conversion
<add> # and for conversion of CuDNN layers.
<add> if layer.__class__.__name__ == 'Bidirectional':
<add> weights = convert_nested_bidirectional(weights)
<add> elif layer.__class__.__name__ in ['Model', 'Sequential']:
<add> weights = convert_nested_model(weights)
<ide>
<ide> if original_keras_version == '1':
<ide> if layer.__class__.__name__ == 'TimeDistributed':
<ide> def preprocess_weights_for_loading(layer, weights,
<ide> (2, 3, 1, 0))
<ide> weights = [kernel, recurrent_kernel, bias]
<ide>
<del> if layer.__class__.__name__ in ['Model', 'Sequential']:
<del> new_weights = []
<del> # trainable weights
<del> for sublayer in layer.layers:
<del> num_weights = len(sublayer.trainable_weights)
<del> if num_weights > 0:
<del> new_weights.extend(preprocess_weights_for_loading(
<del> layer=sublayer,
<del> weights=weights[:num_weights],
<del> original_keras_version=original_keras_version,
<del> original_backend=original_backend))
<del> weights = weights[num_weights:]
<del>
<del> # non-trainable weights
<del> for sublayer in layer.layers:
<del> num_weights = len([l for l in sublayer.weights
<del> if l not in sublayer.trainable_weights])
<del> if num_weights > 0:
<del> new_weights.extend(preprocess_weights_for_loading(
<del> layer=sublayer,
<del> weights=weights[:num_weights],
<del> original_keras_version=original_keras_version,
<del> original_backend=original_backend))
<del> weights = weights[num_weights:]
<del> weights = new_weights
<del>
<ide> conv_layers = ['Conv1D',
<ide> 'Conv2D',
<ide> 'Conv3D',
<ide> def preprocess_weights_for_loading(layer, weights,
<ide> if layer.__class__.__name__ == 'ConvLSTM2D':
<ide> weights[1] = np.transpose(weights[1], (3, 2, 0, 1))
<ide>
<add> # convert CuDNN layers
<ide> weights = _convert_rnn_weights(layer, weights)
<ide>
<ide> return weights
<ide> def convert_weights(weights, from_cudnn=True):
<ide> def convert_weights(weights, from_cudnn=True):
<ide> kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates)
<ide> recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates)
<del> biases = weights[2].reshape((2, -1) if from_cudnn else -1)
<add> biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)
<ide> return [kernels, recurrent_kernels, biases]
<ide>
<ide> if bias_shape == (2 * units * n_gates,):
<ide><path>tests/keras/engine/test_topology.py
<ide> from keras.utils.test_utils import keras_test
<ide>
<ide>
<add>skipif_no_tf_gpu = pytest.mark.skipif(
<add> (K.backend() != 'tensorflow') or (not K.tensorflow_backend._get_available_gpus()),
<add> reason='Requires TensorFlow backend and a GPU')
<add>
<add>
<ide> @keras_test
<ide> def test_get_updates_for():
<ide> a = Input(shape=(2,))
<ide> def test_preprocess_weights_for_loading_rnn_should_be_idempotent(layer_class, la
<ide> (layers.CuDNNGRU, {'units': 2, 'input_shape': [3, 5]}),
<ide> (layers.CuDNNLSTM, {'units': 2, 'input_shape': [3, 5]}),
<ide> ])
<del>@pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not K.tensorflow_backend._get_available_gpus(), reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_preprocess_weights_for_loading_cudnn_rnn_should_be_idempotent(layer_class, layer_args):
<ide> test_preprocess_weights_for_loading_rnn_should_be_idempotent(layer_class, layer_args)
<ide>
<ide><path>tests/keras/layers/cudnn_recurrent_test.py
<ide> import numpy as np
<ide> from numpy.testing import assert_allclose
<ide> import keras
<add>import keras.backend as K
<ide> from keras.utils.test_utils import layer_test
<ide> from keras.utils.test_utils import keras_test
<ide> import time
<ide>
<ide>
<add>skipif_no_tf_gpu = pytest.mark.skipif(
<add> (K.backend() != 'tensorflow') or (not K.tensorflow_backend._get_available_gpus()),
<add> reason='Requires TensorFlow backend and a GPU')
<add>
<add>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_cudnn_rnn_canonical_to_params_lstm():
<ide> units = 1
<ide> input_size = 1
<ide> def test_cudnn_rnn_canonical_to_params_lstm():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_cudnn_rnn_canonical_to_params_gru():
<ide> units = 7
<ide> input_size = 9
<ide> def test_cudnn_rnn_canonical_to_params_gru():
<ide>
<ide> @keras_test
<ide> @pytest.mark.parametrize('rnn_type', ['lstm', 'gru'], ids=['LSTM', 'GRU'])
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_cudnn_rnn_timing(rnn_type):
<ide> input_size = 1000
<ide> timesteps = 60
<ide> def test_cudnn_rnn_timing(rnn_type):
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_cudnn_rnn_basics():
<ide> input_size = 10
<ide> timesteps = 6
<ide> def test_cudnn_rnn_basics():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_trainability():
<ide> input_size = 10
<ide> units = 2
<ide> def test_trainability():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_regularizer():
<ide> input_size = 10
<ide> timesteps = 6
<ide> def test_regularizer():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_return_state():
<ide> input_size = 10
<ide> timesteps = 6
<ide> def test_return_state():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_specify_initial_state_keras_tensor():
<ide> input_size = 10
<ide> timesteps = 6
<ide> def test_specify_initial_state_keras_tensor():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_statefulness():
<ide> input_size = 10
<ide> timesteps = 6
<ide> def test_statefulness():
<ide>
<ide>
<ide> @keras_test
<del>@pytest.mark.parametrize('implementation', [1, 2], ids=['impl1', 'impl2'])
<del>@pytest.mark.parametrize('bidirectional', [False, True], ids=['single', 'bidirectional'])
<del>@pytest.mark.parametrize('to_cudnn', [False, True], ids=['from_cudnn', 'to_cudnn'])
<del>@pytest.mark.parametrize('rnn_type', ['LSTM', 'GRU'], ids=['LSTM', 'GRU'])
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<del>def test_load_weights_between_noncudnn_rnn(rnn_type, to_cudnn, bidirectional, implementation):
<del> input_size = 10
<del> timesteps = 6
<del> input_shape = (timesteps, input_size)
<del> units = 2
<del> num_samples = 32
<del> inputs = np.random.random((num_samples, timesteps, input_size))
<del>
<del> rnn_layer_kwargs = {
<del> 'recurrent_activation': 'sigmoid',
<del> # ensure biases are non-zero and properly converted
<del> 'bias_initializer': 'random_uniform',
<del> 'implementation': implementation
<del> }
<del> if rnn_type == 'LSTM':
<del> rnn_layer_class = keras.layers.LSTM
<del> cudnn_rnn_layer_class = keras.layers.CuDNNLSTM
<del> else:
<del> rnn_layer_class = keras.layers.GRU
<del> cudnn_rnn_layer_class = keras.layers.CuDNNGRU
<del> rnn_layer_kwargs['reset_after'] = True
<del>
<del> def convert_weights(source_layer, target_layer):
<del> weights = source_layer.get_weights()
<del> weights = keras.engine.saving.preprocess_weights_for_loading(target_layer, weights)
<del> target_layer.set_weights(weights)
<del>
<del> input_layer = keras.layers.InputLayer(input_shape)
<del>
<del> layer = rnn_layer_class(units, **rnn_layer_kwargs)
<del> if bidirectional:
<del> layer = keras.layers.Bidirectional(layer)
<del>
<del> cudnn_layer = cudnn_rnn_layer_class(units)
<del> if bidirectional:
<del> cudnn_layer = keras.layers.Bidirectional(cudnn_layer)
<del>
<del> model = keras.models.Sequential([input_layer, layer])
<del> cudnn_model = keras.models.Sequential([input_layer, cudnn_layer])
<del>
<del> if to_cudnn:
<del> convert_weights(layer, cudnn_layer)
<del> else:
<del> convert_weights(cudnn_layer, layer)
<del>
<del> assert_allclose(model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
<del>
<del>
<del>@keras_test
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<add>@skipif_no_tf_gpu
<ide> def test_cudnnrnn_bidirectional():
<ide> rnn = keras.layers.CuDNNGRU
<ide> samples = 2
<ide> def test_cudnnrnn_bidirectional():
<ide> model.fit(x, y, epochs=1, batch_size=1)
<ide>
<ide>
<del>@pytest.mark.skipif((keras.backend.backend() != 'tensorflow'),
<del> reason='Requires TensorFlow backend')
<del>@pytest.mark.skipif(not keras.backend.tensorflow_backend._get_available_gpus(),
<del> reason='Requires GPU')
<del>def test_preprocess_weights_for_loading_gru_incompatible():
<del> """
<del> Loading weights between incompatible layers should fail fast with an exception.
<del> """
<del> def gru(cudnn=False, **kwargs):
<del> layer_class = keras.layers.CuDNNGRU if cudnn else keras.layers.GRU
<del> return layer_class(2, input_shape=[3, 5], **kwargs)
<del>
<del> def initialize_weights(layer):
<del> # A model is needed to initialize weights.
<del> _ = keras.models.Sequential([layer])
<del> return layer
<del>
<del> def assert_not_compatible(src, dest, message):
<del> with pytest.raises(ValueError) as ex:
<del> keras.engine.saving.preprocess_weights_for_loading(
<del> dest, initialize_weights(src).get_weights())
<del> assert message in ex.value.message
<del>
<del> assert_not_compatible(gru(), gru(cudnn=True),
<del> 'GRU(reset_after=False) is not compatible with CuDNNGRU')
<del> assert_not_compatible(gru(cudnn=True), gru(),
<del> 'CuDNNGRU is not compatible with GRU(reset_after=False)')
<del> assert_not_compatible(gru(), gru(reset_after=True),
<del> 'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
<del> assert_not_compatible(gru(reset_after=True), gru(),
<del> 'GRU(reset_after=True) is not compatible with GRU(reset_after=False)')
<del>
<del>
<ide> if __name__ == '__main__':
<ide> pytest.main([__file__])
<ide><path>tests/test_model_saving.py
<ide> from numpy.testing import assert_raises
<ide>
<ide> from keras import backend as K
<add>from keras.engine.saving import preprocess_weights_for_loading
<ide> from keras.models import Model, Sequential
<del>from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed, LSTM
<add>from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed, Bidirectional, GRU, LSTM, CuDNNGRU, CuDNNLSTM
<ide> from keras.layers import Conv2D, Flatten
<del>from keras.layers import Input
<add>from keras.layers import Input, InputLayer
<ide> from keras import optimizers
<ide> from keras import losses
<ide> from keras import metrics
<ide> from keras.utils.test_utils import keras_test
<ide> from keras.models import save_model, load_model
<ide>
<ide>
<add>skipif_no_tf_gpu = pytest.mark.skipif(
<add> (K.backend() != 'tensorflow') or (not K.tensorflow_backend._get_available_gpus()),
<add> reason='Requires TensorFlow backend and a GPU')
<add>
<add>
<ide> @keras_test
<ide> def test_sequential_model_saving():
<ide> model = Sequential()
<ide> def test_saving_recurrent_layer_without_bias():
<ide> os.remove(fname)
<ide>
<ide>
<add>@keras_test
<add>@pytest.mark.parametrize('implementation', [1, 2], ids=['impl1', 'impl2'])
<add>@pytest.mark.parametrize('bidirectional', [False, True], ids=['single', 'bidirectional'])
<add>@pytest.mark.parametrize('to_cudnn', [False, True], ids=['from_cudnn', 'to_cudnn'])
<add>@pytest.mark.parametrize('rnn_type', ['LSTM', 'GRU'], ids=['LSTM', 'GRU'])
<add>@pytest.mark.parametrize('model_nest_level', [1, 2], ids=['model_plain', 'model_nested'])
<add>@pytest.mark.parametrize('model_type', ['func', 'seq'], ids=['model_func', 'model_seq'])
<add>@skipif_no_tf_gpu
<add>def test_load_weights_between_noncudnn_rnn(rnn_type, to_cudnn, bidirectional, implementation,
<add> model_nest_level, model_type):
<add> input_size = 10
<add> timesteps = 6
<add> input_shape = (timesteps, input_size)
<add> units = 2
<add> num_samples = 32
<add> inputs = np.random.random((num_samples, timesteps, input_size))
<add>
<add> rnn_layer_kwargs = {
<add> 'recurrent_activation': 'sigmoid',
<add> # ensure biases are non-zero and properly converted
<add> 'bias_initializer': 'random_uniform',
<add> 'implementation': implementation
<add> }
<add> if rnn_type == 'LSTM':
<add> rnn_layer_class = LSTM
<add> cudnn_rnn_layer_class = CuDNNLSTM
<add> else:
<add> rnn_layer_class = GRU
<add> cudnn_rnn_layer_class = CuDNNGRU
<add> rnn_layer_kwargs['reset_after'] = True
<add>
<add> def convert_model(source_model, target_model):
<add> _, fname = tempfile.mkstemp('.h5')
<add> source_model.save_weights(fname)
<add> target_model.load_weights(fname)
<add> os.remove(fname)
<add>
<add> layer = rnn_layer_class(units, **rnn_layer_kwargs)
<add> if bidirectional:
<add> layer = Bidirectional(layer)
<add>
<add> cudnn_layer = cudnn_rnn_layer_class(units)
<add> if bidirectional:
<add> cudnn_layer = Bidirectional(cudnn_layer)
<add>
<add> model = _make_nested_model(input_shape, layer, model_nest_level, model_type)
<add> cudnn_model = _make_nested_model(input_shape, cudnn_layer, model_nest_level, model_type)
<add>
<add> if to_cudnn:
<add> convert_model(model, cudnn_model)
<add> else:
<add> convert_model(cudnn_model, model)
<add>
<add> assert_allclose(model.predict(inputs), cudnn_model.predict(inputs), atol=1e-4)
<add>
<add>
<add>def _make_nested_model(input_shape, layer, level=1, model_type='func'):
<add> # example: make_nested_seq_model((1,), Dense(10), level=2).summary()
<add> def make_nested_seq_model(input_shape, layer, level=1):
<add> model = layer
<add> for i in range(1, level + 1):
<add> layers = [InputLayer(input_shape), model] if (i == 1) else [model]
<add> model = Sequential(layers)
<add> return model
<add>
<add> # example: make_nested_func_model((1,), Dense(10), level=2).summary()
<add> def make_nested_func_model(input_shape, layer, level=1):
<add> input = Input(input_shape)
<add> model = layer
<add> for i in range(level):
<add> model = Model(input, model(input))
<add> return model
<add>
<add> if model_type == 'func':
<add> return make_nested_func_model(input_shape, layer, level)
<add> elif model_type == 'seq':
<add> return make_nested_seq_model(input_shape, layer, level)
<add>
<add>
<add>@skipif_no_tf_gpu
<add>def test_preprocess_weights_for_loading_gru_incompatible():
<add> """
<add> Loading weights between incompatible layers should fail fast with an exception.
<add> """
<add> def gru(cudnn=False, **kwargs):
<add> layer_class = CuDNNGRU if cudnn else GRU
<add> return layer_class(2, input_shape=[3, 5], **kwargs)
<add>
<add> def initialize_weights(layer):
<add> # A model is needed to initialize weights.
<add> _ = Sequential([layer])
<add> return layer
<add>
<add> def assert_not_compatible(src, dest, message):
<add> with pytest.raises(ValueError) as ex:
<add> preprocess_weights_for_loading(dest, initialize_weights(src).get_weights())
<add> assert message in ex.value.message
<add>
<add> assert_not_compatible(gru(), gru(cudnn=True),
<add> 'GRU(reset_after=False) is not compatible with CuDNNGRU')
<add> assert_not_compatible(gru(cudnn=True), gru(),
<add> 'CuDNNGRU is not compatible with GRU(reset_after=False)')
<add> assert_not_compatible(gru(), gru(reset_after=True),
<add> 'GRU(reset_after=False) is not compatible with GRU(reset_after=True)')
<add> assert_not_compatible(gru(reset_after=True), gru(),
<add> 'GRU(reset_after=True) is not compatible with GRU(reset_after=False)')
<add>
<add>
<ide> if __name__ == '__main__':
<ide> pytest.main([__file__])
| 4
|
Javascript
|
Javascript
|
support optional headers with wrk
|
f8e75512951b5060409b0c80e246ffc81ee9e72e
|
<ide><path>benchmark/_http-benchmarkers.js
<ide> class WrkBenchmarker {
<ide> '-t', 8,
<ide> `http://127.0.0.1:${options.port}${options.path}`,
<ide> ];
<add> for (const field in options.headers) {
<add> args.push('-H', `${field}: ${options.headers[field]}`);
<add> }
<ide> const child = child_process.spawn(this.executable, args);
<ide> return child;
<ide> }
| 1
|
Ruby
|
Ruby
|
use #remove_possible_method instead here
|
9be7911e873e1ba0d0b0bac197b426693b4f3885
|
<ide><path>actionpack/lib/action_dispatch/routing/route_set.rb
<ide> require 'active_support/core_ext/object/blank'
<ide> require 'active_support/core_ext/object/to_query'
<ide> require 'active_support/core_ext/hash/slice'
<add>require 'active_support/core_ext/module/remove_method'
<ide>
<ide> module ActionDispatch
<ide> module Routing
<ide> def define_hash_access(route, name, kind, options)
<ide>
<ide> # We use module_eval to avoid leaks
<ide> @module.module_eval <<-END_EVAL, __FILE__, __LINE__ + 1
<del> remove_method :#{selector} if method_defined?(:#{selector})
<add> remove_possible_method :#{selector}
<ide> def #{selector}(*args)
<ide> options = args.extract_options!
<ide>
<ide> def define_url_helper(route, name, kind, options)
<ide> hash_access_method = hash_access_name(name, kind)
<ide>
<ide> @module.module_eval <<-END_EVAL, __FILE__, __LINE__ + 1
<del> remove_method :#{selector} if method_defined?(:#{selector})
<add> remove_possible_method :#{selector}
<ide> def #{selector}(*args)
<ide> url_for(#{hash_access_method}(*args))
<ide> end
| 1
|
Ruby
|
Ruby
|
remove new method and reset without todo
|
7a95219d2b60253103f04424e743501e35fe18cb
|
<ide><path>Library/Homebrew/formula_cellar_checks.rb
<ide> def check_cpuid_instruction(formula)
<ide> dot_brew_formula = formula.prefix/".brew/#{formula.name}.rb"
<ide> return unless dot_brew_formula.exist?
<ide>
<del> require "utils/ast"
<del> return unless Utils::AST::FormulaAST.new(dot_brew_formula.read).include_runtime_cpu_detection?
<add> return unless dot_brew_formula.read.include? "ENV.runtime_cpu_detection"
<ide>
<ide> # macOS `objdump` is a bit slow, so we prioritise llvm's `llvm-objdump` (~5.7x faster)
<ide> # or binutils' `objdump` (~1.8x faster) if they are installed.
<ide><path>Library/Homebrew/utils/ast.rb
<ide> def add_stanza(name, value, type: nil)
<ide> tree_rewriter.insert_after(preceding_expr, "\n#{stanza_text(name, value, indent: 2)}")
<ide> end
<ide>
<del> sig { returns(T::Boolean) }
<del> def include_runtime_cpu_detection?
<del> install_node = children.find do |child|
<del> (child.is_a? RuboCop::AST::DefNode) && child.method_name == :install
<del> end
<del>
<del> return false if install_node.blank?
<del>
<del> install_node.each_node.any? do |node|
<del> node.send_type? && node.receiver&.const_name == "ENV" && node.method_name == :runtime_cpu_detection
<del> end
<del> end
<del>
<ide> private
<ide>
<ide> sig { returns(String) }
| 2
|
Javascript
|
Javascript
|
log it all
|
027b433953a6e5234b7236758a3bc75575dc8092
|
<ide><path>spec/integration/helpers/start-atom.js
<ide> module.exports = function(args, env, fn) {
<ide> errorCode = code
<ide> }
<ide> })
<del> chromedriver.stderr.on('data', log => chromedriverLogs.push(log.toString()))
<add> chromedriver.stdout.on('data', log => console.log(log.toString()))
<add> chromedriver.stderr.on('data', log => console.log(log.toString()))
<add> // chromedriver.stderr.on('data', log => chromedriverLogs.push(log.toString()))
<ide> chromedriver.stderr.on('close', () => resolve(errorCode))
<ide> })
<ide> })
| 1
|
PHP
|
PHP
|
add check for simple category elements
|
3a8c49e31999f69dc184cd020f4f7879d71c495b
|
<ide><path>lib/Cake/Test/Case/View/Helper/RssHelperTest.php
<ide> public function testItem() {
<ide> '/item'
<ide> );
<ide> $this->assertTags($result, $expected);
<add>
<add> $item = array(
<add> 'title' => 'My title',
<add> 'description' => 'My description',
<add> 'link' => 'http://www.google.com/',
<add> 'category' => array('Category One', 'Category Two')
<add> );
<add> $result = $this->Rss->item(null, $item);
<add> $expected = array(
<add> '<item',
<add> '<title',
<add> 'My title',
<add> '/title',
<add> '<description',
<add> 'My description',
<add> '/description',
<add> '<link',
<add> 'http://www.google.com/',
<add> '/link',
<add> '<category',
<add> 'Category One',
<add> '/category',
<add> '<category',
<add> 'Category Two',
<add> '/category',
<add> '<guid',
<add> 'http://www.google.com/',
<add> '/guid',
<add> '/item'
<add> );
<add> $this->assertTags($result, $expected);
<ide> }
<ide>
<ide> /**
<ide><path>lib/Cake/View/Helper/RssHelper.php
<ide> public function item($att = array(), $elements = array()) {
<ide> if (is_array($val) && !empty($val[0])) {
<ide> foreach ($val as $category) {
<ide> $attrib = array();
<del> if (isset($category['domain'])) {
<add> if (is_array($category) && isset($category['domain'])) {
<ide> $attrib['domain'] = $category['domain'];
<ide> unset($category['domain']);
<ide> }
| 2
|
Mixed
|
Python
|
add fp16 support to official resnet.
|
fbb27cf31f09c3b4b10c1e237fd283f06db301d2
|
<ide><path>official/resnet/README.md
<ide> You can download 190 MB pre-trained versions of ResNet-50 achieving 76.3% and 75
<ide>
<ide> Other versions and formats:
<ide>
<del>* [ResNet-v2-ImageNet Checkpoint](http://download.tensorflow.org/models/official/resnetv2_imagenet_checkpoint.tar.gz)
<del>* [ResNet-v2-ImageNet SavedModel](http://download.tensorflow.org/models/official/resnetv2_imagenet_savedmodel.tar.gz)
<del>* [ResNet-v2-ImageNet Frozen Graph](http://download.tensorflow.org/models/official/resnetv2_imagenet_frozen_graph.pb)
<del>* [ResNet-v1-ImageNet Checkpoint](http://download.tensorflow.org/models/official/resnetv1_imagenet_checkpoint.tar.gz)
<del>* [ResNet-v1-ImageNet SavedModel](http://download.tensorflow.org/models/official/resnetv1_imagenet_savedmodel.tar.gz)
<del>* [ResNet-v1-ImageNet Frozen Graph](http://download.tensorflow.org/models/official/resnetv1_imagenet_frozen_graph.pb)
<add>* [ResNet-v2-ImageNet Checkpoint](http://download.tensorflow.org/models/official/resnet_v2_imagenet_checkpoint.tar.gz)
<add>* [ResNet-v2-ImageNet SavedModel](http://download.tensorflow.org/models/official/resnet_v2_imagenet_savedmodel.tar.gz)
<add>* [ResNet-v1-ImageNet Checkpoint](http://download.tensorflow.org/models/official/resnet_v1_imagenet_checkpoint.tar.gz)
<add>* [ResNet-v1-ImageNet SavedModel](http://download.tensorflow.org/models/official/resnet_v1_imagenet_savedmodel.tar.gz)
<ide><path>official/resnet/cifar10_main.py
<ide> class Cifar10Model(resnet_model.Model):
<ide> """Model class with appropriate defaults for CIFAR-10 data."""
<ide>
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<del> version=resnet_model.DEFAULT_VERSION):
<add> version=resnet_model.DEFAULT_VERSION,
<add> dtype=resnet_model.DEFAULT_DTYPE):
<ide> """These are the parameters that work for CIFAR-10 data.
<ide>
<ide> Args:
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<ide> enables users to extend the same model to their own datasets.
<ide> version: Integer representing which version of the ResNet network to use.
<ide> See README for details. Valid values: [1, 2]
<add> dtype: The TensorFlow dtype to use for calculations.
<ide>
<ide> Raises:
<ide> ValueError: if invalid resnet_size is chosen
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<ide> block_strides=[1, 2, 2],
<ide> final_size=64,
<ide> version=version,
<del> data_format=data_format)
<add> data_format=data_format,
<add> dtype=dtype
<add> )
<ide>
<ide>
<ide> def cifar10_model_fn(features, labels, mode, params):
<ide> def cifar10_model_fn(features, labels, mode, params):
<ide> def loss_filter_fn(_):
<ide> return True
<ide>
<del> return resnet_run_loop.resnet_model_fn(features, labels, mode, Cifar10Model,
<del> resnet_size=params['resnet_size'],
<del> weight_decay=weight_decay,
<del> learning_rate_fn=learning_rate_fn,
<del> momentum=0.9,
<del> data_format=params['data_format'],
<del> version=params['version'],
<del> loss_filter_fn=loss_filter_fn,
<del> multi_gpu=params['multi_gpu'])
<add> return resnet_run_loop.resnet_model_fn(
<add> features=features,
<add> labels=labels,
<add> mode=mode,
<add> model_class=Cifar10Model,
<add> resnet_size=params['resnet_size'],
<add> weight_decay=weight_decay,
<add> learning_rate_fn=learning_rate_fn,
<add> momentum=0.9,
<add> data_format=params['data_format'],
<add> version=params['version'],
<add> loss_scale=params['loss_scale'],
<add> loss_filter_fn=loss_filter_fn,
<add> multi_gpu=params['multi_gpu'],
<add> dtype=params['dtype']
<add> )
<ide>
<ide>
<ide> def main(argv):
<ide><path>official/resnet/cifar10_test.py
<ide> def test_dataset_input_fn(self):
<ide> for pixel in row:
<ide> self.assertAllClose(pixel, np.array([-1.225, 0., 1.225]), rtol=1e-3)
<ide>
<add> def _cifar10_model_fn_helper(self, mode, version, dtype, multi_gpu=False):
<add> with tf.Graph().as_default() as g:
<add> input_fn = cifar10_main.get_synth_input_fn()
<add> dataset = input_fn(True, '', _BATCH_SIZE)
<add> iterator = dataset.make_one_shot_iterator()
<add> features, labels = iterator.get_next()
<add> spec = cifar10_main.cifar10_model_fn(
<add> features, labels, mode, {
<add> 'dtype': dtype,
<add> 'resnet_size': 32,
<add> 'data_format': 'channels_last',
<add> 'batch_size': _BATCH_SIZE,
<add> 'version': version,
<add> 'loss_scale': 128 if dtype == tf.float16 else 1,
<add> 'multi_gpu': multi_gpu
<add> })
<add>
<add> predictions = spec.predictions
<add> self.assertAllEqual(predictions['probabilities'].shape,
<add> (_BATCH_SIZE, 10))
<add> self.assertEqual(predictions['probabilities'].dtype, tf.float32)
<add> self.assertAllEqual(predictions['classes'].shape, (_BATCH_SIZE,))
<add> self.assertEqual(predictions['classes'].dtype, tf.int64)
<add>
<add> if mode != tf.estimator.ModeKeys.PREDICT:
<add> loss = spec.loss
<add> self.assertAllEqual(loss.shape, ())
<add> self.assertEqual(loss.dtype, tf.float32)
<add>
<add> if mode == tf.estimator.ModeKeys.EVAL:
<add> eval_metric_ops = spec.eval_metric_ops
<add> self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
<add> self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
<add> self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
<add> self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
<add>
<add> for v in tf.trainable_variables():
<add> self.assertEqual(v.dtype.base_dtype, tf.float32)
<add>
<add> tensors_to_check = ('initial_conv:0', 'block_layer1:0', 'block_layer2:0',
<add> 'block_layer3:0', 'final_reduce_mean:0',
<add> 'final_dense:0')
<add>
<add> for tensor_name in tensors_to_check:
<add> tensor = g.get_tensor_by_name('resnet_model/' + tensor_name)
<add> self.assertEqual(tensor.dtype, dtype,
<add> 'Tensor {} has dtype {}, while dtype {} was '
<add> 'expected'.format(tensor, tensor.dtype,
<add> dtype))
<add>
<ide> def cifar10_model_fn_helper(self, mode, version, multi_gpu=False):
<del> input_fn = cifar10_main.get_synth_input_fn()
<del> dataset = input_fn(True, '', _BATCH_SIZE)
<del> iterator = dataset.make_one_shot_iterator()
<del> features, labels = iterator.get_next()
<del> spec = cifar10_main.cifar10_model_fn(
<del> features, labels, mode, {
<del> 'resnet_size': 32,
<del> 'data_format': 'channels_last',
<del> 'batch_size': _BATCH_SIZE,
<del> 'version': version,
<del> 'multi_gpu': multi_gpu
<del> })
<del>
<del> predictions = spec.predictions
<del> self.assertAllEqual(predictions['probabilities'].shape,
<del> (_BATCH_SIZE, 10))
<del> self.assertEqual(predictions['probabilities'].dtype, tf.float32)
<del> self.assertAllEqual(predictions['classes'].shape, (_BATCH_SIZE,))
<del> self.assertEqual(predictions['classes'].dtype, tf.int64)
<del>
<del> if mode != tf.estimator.ModeKeys.PREDICT:
<del> loss = spec.loss
<del> self.assertAllEqual(loss.shape, ())
<del> self.assertEqual(loss.dtype, tf.float32)
<del>
<del> if mode == tf.estimator.ModeKeys.EVAL:
<del> eval_metric_ops = spec.eval_metric_ops
<del> self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
<del> self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
<del> self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
<del> self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
<add> self._cifar10_model_fn_helper(mode=mode, version=version, dtype=tf.float32,
<add> multi_gpu=multi_gpu)
<add> self._cifar10_model_fn_helper(mode=mode, version=version, dtype=tf.float16,
<add> multi_gpu=multi_gpu)
<ide>
<ide> def test_cifar10_model_fn_train_mode_v1(self):
<ide> self.cifar10_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=1)
<ide> def test_cifar10_model_fn_predict_mode_v1(self):
<ide> def test_cifar10_model_fn_predict_mode_v2(self):
<ide> self.cifar10_model_fn_helper(tf.estimator.ModeKeys.PREDICT, version=2)
<ide>
<del> def test_cifar10model_shape(self):
<add> def _test_cifar10model_shape(self, version):
<ide> batch_size = 135
<ide> num_classes = 246
<ide>
<del> for version in (1, 2):
<del> model = cifar10_main.Cifar10Model(
<del> 32, data_format='channels_last', num_classes=num_classes,
<del> version=version)
<del> fake_input = tf.random_uniform(
<del> [batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
<del> output = model(fake_input, training=True)
<add> model = cifar10_main.Cifar10Model(32, data_format='channels_last',
<add> num_classes=num_classes, version=version)
<add> fake_input = tf.random_uniform([batch_size, _HEIGHT, _WIDTH, _NUM_CHANNELS])
<add> output = model(fake_input, training=True)
<add>
<add> self.assertAllEqual(output.shape, (batch_size, num_classes))
<add>
<add> def test_cifar10model_shape_v1(self):
<add> self._test_cifar10model_shape(version=1)
<ide>
<del> self.assertAllEqual(output.shape, (batch_size, num_classes))
<add> def test_cifar10model_shape_v2(self):
<add> self._test_cifar10model_shape(version=2)
<ide>
<ide> def test_cifar10_end_to_end_synthetic_v1(self):
<ide> integration.run_synthetic(
<ide><path>official/resnet/imagenet_main.py
<ide> class ImagenetModel(resnet_model.Model):
<ide> """Model class with appropriate defaults for Imagenet data."""
<ide>
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<del> version=resnet_model.DEFAULT_VERSION):
<add> version=resnet_model.DEFAULT_VERSION,
<add> dtype=resnet_model.DEFAULT_DTYPE):
<ide> """These are the parameters that work for Imagenet data.
<ide>
<ide> Args:
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<ide> enables users to extend the same model to their own datasets.
<ide> version: Integer representing which version of the ResNet network to use.
<ide> See README for details. Valid values: [1, 2]
<add> dtype: The TensorFlow dtype to use for calculations.
<ide> """
<ide>
<ide> # For bigger models, we want to use "bottleneck" layers
<ide> def __init__(self, resnet_size, data_format=None, num_classes=_NUM_CLASSES,
<ide> block_strides=[1, 2, 2, 2],
<ide> final_size=final_size,
<ide> version=version,
<del> data_format=data_format)
<add> data_format=data_format,
<add> dtype=dtype
<add> )
<ide>
<ide>
<ide> def _get_block_sizes(resnet_size):
<ide> def imagenet_model_fn(features, labels, mode, params):
<ide> num_images=_NUM_IMAGES['train'], boundary_epochs=[30, 60, 80, 90],
<ide> decay_rates=[1, 0.1, 0.01, 0.001, 1e-4])
<ide>
<del> return resnet_run_loop.resnet_model_fn(features, labels, mode, ImagenetModel,
<del> resnet_size=params['resnet_size'],
<del> weight_decay=1e-4,
<del> learning_rate_fn=learning_rate_fn,
<del> momentum=0.9,
<del> data_format=params['data_format'],
<del> version=params['version'],
<del> loss_filter_fn=None,
<del> multi_gpu=params['multi_gpu'])
<add> return resnet_run_loop.resnet_model_fn(
<add> features=features,
<add> labels=labels,
<add> mode=mode,
<add> model_class=ImagenetModel,
<add> resnet_size=params['resnet_size'],
<add> weight_decay=1e-4,
<add> learning_rate_fn=learning_rate_fn,
<add> momentum=0.9,
<add> data_format=params['data_format'],
<add> version=params['version'],
<add> loss_scale=params['loss_scale'],
<add> loss_filter_fn=None,
<add> multi_gpu=params['multi_gpu'],
<add> dtype=params['dtype']
<add> )
<ide>
<ide>
<ide> def main(argv):
<ide><path>official/resnet/imagenet_test.py
<ide> def tearDown(self):
<ide> super(BaseTest, self).tearDown()
<ide> tf.gfile.DeleteRecursively(self.get_temp_dir())
<ide>
<del> def tensor_shapes_helper(self, resnet_size, version, with_gpu=False):
<add> def _tensor_shapes_helper(self, resnet_size, version, dtype, with_gpu):
<ide> """Checks the tensor shapes after each phase of the ResNet model."""
<ide> def reshape(shape):
<ide> """Returns the expected dimensions depending on if a GPU is being used."""
<ide> def reshape(shape):
<ide> graph = tf.Graph()
<ide>
<ide> with graph.as_default(), self.test_session(
<del> use_gpu=with_gpu, force_gpu=with_gpu):
<add> graph=graph, use_gpu=with_gpu, force_gpu=with_gpu):
<ide> model = imagenet_main.ImagenetModel(
<del> resnet_size,
<add> resnet_size=resnet_size,
<ide> data_format='channels_first' if with_gpu else 'channels_last',
<del> version=version)
<add> version=version,
<add> dtype=dtype
<add> )
<ide> inputs = tf.random_uniform([1, 224, 224, 3])
<ide> output = model(inputs, training=True)
<ide>
<del> initial_conv = graph.get_tensor_by_name('initial_conv:0')
<del> max_pool = graph.get_tensor_by_name('initial_max_pool:0')
<del> block_layer1 = graph.get_tensor_by_name('block_layer1:0')
<del> block_layer2 = graph.get_tensor_by_name('block_layer2:0')
<del> block_layer3 = graph.get_tensor_by_name('block_layer3:0')
<del> block_layer4 = graph.get_tensor_by_name('block_layer4:0')
<del> reduce_mean = graph.get_tensor_by_name('final_reduce_mean:0')
<del> dense = graph.get_tensor_by_name('final_dense:0')
<add> initial_conv = graph.get_tensor_by_name('resnet_model/initial_conv:0')
<add> max_pool = graph.get_tensor_by_name('resnet_model/initial_max_pool:0')
<add> block_layer1 = graph.get_tensor_by_name('resnet_model/block_layer1:0')
<add> block_layer2 = graph.get_tensor_by_name('resnet_model/block_layer2:0')
<add> block_layer3 = graph.get_tensor_by_name('resnet_model/block_layer3:0')
<add> block_layer4 = graph.get_tensor_by_name('resnet_model/block_layer4:0')
<add> reduce_mean = graph.get_tensor_by_name('resnet_model/final_reduce_mean:0')
<add> dense = graph.get_tensor_by_name('resnet_model/final_dense:0')
<ide>
<ide> self.assertAllEqual(initial_conv.shape, reshape((1, 64, 112, 112)))
<ide> self.assertAllEqual(max_pool.shape, reshape((1, 64, 56, 56)))
<ide> def reshape(shape):
<ide> self.assertAllEqual(dense.shape, (1, _LABEL_CLASSES))
<ide> self.assertAllEqual(output.shape, (1, _LABEL_CLASSES))
<ide>
<add> def tensor_shapes_helper(self, resnet_size, version, with_gpu=False):
<add> self._tensor_shapes_helper(resnet_size=resnet_size, version=version,
<add> dtype=tf.float32, with_gpu=with_gpu)
<add> self._tensor_shapes_helper(resnet_size=resnet_size, version=version,
<add> dtype=tf.float16, with_gpu=with_gpu)
<add>
<ide> def test_tensor_shapes_resnet_18_v1(self):
<ide> self.tensor_shapes_helper(18, version=1)
<ide>
<ide> def test_tensor_shapes_resnet_200_with_gpu_v1(self):
<ide> def test_tensor_shapes_resnet_200_with_gpu_v2(self):
<ide> self.tensor_shapes_helper(200, version=2, with_gpu=True)
<ide>
<del> def resnet_model_fn_helper(self, mode, version, multi_gpu=False):
<add> def _resnet_model_fn_helper(self, mode, version, dtype, multi_gpu):
<ide> """Tests that the EstimatorSpec is given the appropriate arguments."""
<del> tf.train.create_global_step()
<del>
<del> input_fn = imagenet_main.get_synth_input_fn()
<del> dataset = input_fn(True, '', _BATCH_SIZE)
<del> iterator = dataset.make_one_shot_iterator()
<del> features, labels = iterator.get_next()
<del> spec = imagenet_main.imagenet_model_fn(
<del> features, labels, mode, {
<del> 'resnet_size': 50,
<del> 'data_format': 'channels_last',
<del> 'batch_size': _BATCH_SIZE,
<del> 'version': version,
<del> 'multi_gpu': multi_gpu,
<del> })
<del>
<del> predictions = spec.predictions
<del> self.assertAllEqual(predictions['probabilities'].shape,
<del> (_BATCH_SIZE, _LABEL_CLASSES))
<del> self.assertEqual(predictions['probabilities'].dtype, tf.float32)
<del> self.assertAllEqual(predictions['classes'].shape, (_BATCH_SIZE,))
<del> self.assertEqual(predictions['classes'].dtype, tf.int64)
<del>
<del> if mode != tf.estimator.ModeKeys.PREDICT:
<del> loss = spec.loss
<del> self.assertAllEqual(loss.shape, ())
<del> self.assertEqual(loss.dtype, tf.float32)
<del>
<del> if mode == tf.estimator.ModeKeys.EVAL:
<del> eval_metric_ops = spec.eval_metric_ops
<del> self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
<del> self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
<del> self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
<del> self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
<add> with tf.Graph().as_default() as g:
<add> tf.train.create_global_step()
<add>
<add> input_fn = imagenet_main.get_synth_input_fn()
<add> dataset = input_fn(True, '', _BATCH_SIZE)
<add> iterator = dataset.make_one_shot_iterator()
<add> features, labels = iterator.get_next()
<add> spec = imagenet_main.imagenet_model_fn(
<add> features, labels, mode, {
<add> 'dtype': dtype,
<add> 'resnet_size': 50,
<add> 'data_format': 'channels_last',
<add> 'batch_size': _BATCH_SIZE,
<add> 'version': version,
<add> 'loss_scale': 128 if dtype == tf.float16 else 1,
<add> 'multi_gpu': multi_gpu,
<add> })
<add>
<add> predictions = spec.predictions
<add> self.assertAllEqual(predictions['probabilities'].shape,
<add> (_BATCH_SIZE, _LABEL_CLASSES))
<add> self.assertEqual(predictions['probabilities'].dtype, tf.float32)
<add> self.assertAllEqual(predictions['classes'].shape, (_BATCH_SIZE,))
<add> self.assertEqual(predictions['classes'].dtype, tf.int64)
<add>
<add> if mode != tf.estimator.ModeKeys.PREDICT:
<add> loss = spec.loss
<add> self.assertAllEqual(loss.shape, ())
<add> self.assertEqual(loss.dtype, tf.float32)
<add>
<add> if mode == tf.estimator.ModeKeys.EVAL:
<add> eval_metric_ops = spec.eval_metric_ops
<add> self.assertAllEqual(eval_metric_ops['accuracy'][0].shape, ())
<add> self.assertAllEqual(eval_metric_ops['accuracy'][1].shape, ())
<add> self.assertEqual(eval_metric_ops['accuracy'][0].dtype, tf.float32)
<add> self.assertEqual(eval_metric_ops['accuracy'][1].dtype, tf.float32)
<add>
<add> tensors_to_check = ('initial_conv:0', 'initial_max_pool:0',
<add> 'block_layer1:0', 'block_layer2:0',
<add> 'block_layer3:0', 'block_layer4:0',
<add> 'final_reduce_mean:0', 'final_dense:0')
<add>
<add> for tensor_name in tensors_to_check:
<add> tensor = g.get_tensor_by_name('resnet_model/' + tensor_name)
<add> self.assertEqual(tensor.dtype, dtype,
<add> 'Tensor {} has dtype {}, while dtype {} was '
<add> 'expected'.format(tensor, tensor.dtype,
<add> dtype))
<add>
<add> def resnet_model_fn_helper(self, mode, version, multi_gpu=False):
<add> self._resnet_model_fn_helper(mode=mode, version=version, dtype=tf.float32,
<add> multi_gpu=multi_gpu)
<add> self._resnet_model_fn_helper(mode=mode, version=version, dtype=tf.float16,
<add> multi_gpu=multi_gpu)
<ide>
<ide> def test_resnet_model_fn_train_mode_v1(self):
<ide> self.resnet_model_fn_helper(tf.estimator.ModeKeys.TRAIN, version=1)
<ide> def test_resnet_model_fn_predict_mode_v1(self):
<ide> def test_resnet_model_fn_predict_mode_v2(self):
<ide> self.resnet_model_fn_helper(tf.estimator.ModeKeys.PREDICT, version=2)
<ide>
<del> def test_imagenetmodel_shape(self):
<add> def _test_imagenetmodel_shape(self, version):
<ide> batch_size = 135
<ide> num_classes = 246
<ide>
<del> for version in (1, 2):
<del> model = imagenet_main.ImagenetModel(
<del> 50, data_format='channels_last', num_classes=num_classes,
<del> version=version)
<del> fake_input = tf.random_uniform([batch_size, 224, 224, 3])
<del> output = model(fake_input, training=True)
<add> model = imagenet_main.ImagenetModel(
<add> 50, data_format='channels_last', num_classes=num_classes,
<add> version=version)
<add>
<add> fake_input = tf.random_uniform([batch_size, 224, 224, 3])
<add> output = model(fake_input, training=True)
<add>
<add> self.assertAllEqual(output.shape, (batch_size, num_classes))
<add>
<add> def test_imagenetmodel_shape_v1(self):
<add> self._test_imagenetmodel_shape(version=1)
<ide>
<del> self.assertAllEqual(output.shape, (batch_size, num_classes))
<add> def test_imagenetmodel_shape_v2(self):
<add> self._test_imagenetmodel_shape(version=2)
<ide>
<ide> def test_imagenet_end_to_end_synthetic_v1(self):
<ide> integration.run_synthetic(
<ide><path>official/resnet/resnet_model.py
<ide> _BATCH_NORM_DECAY = 0.997
<ide> _BATCH_NORM_EPSILON = 1e-5
<ide> DEFAULT_VERSION = 2
<add>DEFAULT_DTYPE = tf.float32
<add>CASTABLE_TYPES = (tf.float16,)
<add>ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
<ide>
<ide>
<ide> ################################################################################
<ide> def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
<ide> kernel_size,
<ide> conv_stride, first_pool_size, first_pool_stride,
<ide> second_pool_size, second_pool_stride, block_sizes, block_strides,
<del> final_size, version=DEFAULT_VERSION, data_format=None):
<add> final_size, version=DEFAULT_VERSION, data_format=None,
<add> dtype=DEFAULT_DTYPE):
<ide> """Creates a model for classifying an image.
<ide>
<ide> Args:
<ide> def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
<ide> See README for details. Valid values: [1, 2]
<ide> data_format: Input format ('channels_last', 'channels_first', or None).
<ide> If set to None, the format is dependent on whether a GPU is available.
<add> dtype: The TensorFlow dtype to use for calculations. If not specified
<add> tf.float32 is used.
<ide>
<ide> Raises:
<ide> ValueError: if invalid version is selected.
<ide> def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
<ide> else:
<ide> self.block_fn = _building_block_v2
<ide>
<add> if dtype not in ALLOWED_TYPES:
<add> raise ValueError('dtype must be one of: {}'.format(ALLOWED_TYPES))
<add>
<ide> self.data_format = data_format
<ide> self.num_classes = num_classes
<ide> self.num_filters = num_filters
<ide> def __init__(self, resnet_size, bottleneck, num_classes, num_filters,
<ide> self.block_sizes = block_sizes
<ide> self.block_strides = block_strides
<ide> self.final_size = final_size
<add> self.dtype = dtype
<add>
<add> def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,
<add> *args, **kwargs):
<add> """Creates variables in fp32, then casts to fp16 if necessary.
<add>
<add> This function is a custom getter. A custom getter is a function with the
<add> same signature as tf.get_variable, except it has an additional getter
<add> parameter. Custom getters can be passed as the `custom_getter` parameter of
<add> tf.variable_scope. Then, tf.get_variable will call the custom getter,
<add> instead of directly getting a variable itself. This can be used to change
<add> the types of variables that are retrieved with tf.get_variable.
<add> The `getter` parameter is the underlying variable getter, that would have
<add> been called if no custom getter was used. Custom getters typically get a
<add> variable with `getter`, then modify it in some way.
<add>
<add> This custom getter will create an fp32 variable. If a low precision
<add> (e.g. float16) variable was requested it will then cast the variable to the
<add> requested dtype. The reason we do not directly create variables in low
<add> precision dtypes is that applying small gradients to such variables may
<add> cause the variable not to change.
<add>
<add> Args:
<add> getter: The underlying variable getter, that has the same signature as
<add> tf.get_variable and returns a variable.
<add> name: The name of the variable to get.
<add> shape: The shape of the variable to get.
<add> dtype: The dtype of the variable to get. Note that if this is a low
<add> precision dtype, the variable will be created as a tf.float32 variable,
<add> then cast to the appropriate dtype
<add> *args: Additional arguments to pass unmodified to getter.
<add> **kwargs: Additional keyword arguments to pass unmodified to getter.
<add>
<add> Returns:
<add> A variable which is cast to fp16 if necessary.
<add> """
<add>
<add> if dtype in CASTABLE_TYPES:
<add> var = getter(name, shape, tf.float32, *args, **kwargs)
<add> return tf.cast(var, dtype=dtype, name=name + '_cast')
<add> else:
<add> return getter(name, shape, dtype, *args, **kwargs)
<add>
<add> def _model_variable_scope(self):
<add> """Returns a variable scope that the model should be created under.
<add>
<add> If self.dtype is a castable type, model variable will be created in fp32
<add> then cast to self.dtype before being used.
<add>
<add> Returns:
<add> A variable scope for the model.
<add> """
<add>
<add> return tf.variable_scope('resnet_model',
<add> custom_getter=self._custom_dtype_getter)
<ide>
<ide> def __call__(self, inputs, training):
<ide> """Add operations to classify a batch of input images.
<ide> def __call__(self, inputs, training):
<ide> A logits Tensor with shape [<batch_size>, self.num_classes].
<ide> """
<ide>
<del> if self.data_format == 'channels_first':
<del> # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
<del> # This provides a large performance boost on GPU. See
<del> # https://www.tensorflow.org/performance/performance_guide#data_formats
<del> inputs = tf.transpose(inputs, [0, 3, 1, 2])
<del>
<del> inputs = conv2d_fixed_padding(
<del> inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
<del> strides=self.conv_stride, data_format=self.data_format)
<del> inputs = tf.identity(inputs, 'initial_conv')
<del>
<del> if self.first_pool_size:
<del> inputs = tf.layers.max_pooling2d(
<del> inputs=inputs, pool_size=self.first_pool_size,
<del> strides=self.first_pool_stride, padding='SAME',
<del> data_format=self.data_format)
<del> inputs = tf.identity(inputs, 'initial_max_pool')
<del>
<del> for i, num_blocks in enumerate(self.block_sizes):
<del> num_filters = self.num_filters * (2**i)
<del> inputs = block_layer(
<del> inputs=inputs, filters=num_filters, bottleneck=self.bottleneck,
<del> block_fn=self.block_fn, blocks=num_blocks,
<del> strides=self.block_strides[i], training=training,
<del> name='block_layer{}'.format(i + 1), data_format=self.data_format)
<del>
<del> inputs = batch_norm(inputs, training, self.data_format)
<del> inputs = tf.nn.relu(inputs)
<del>
<del> # The current top layer has shape
<del> # `batch_size x pool_size x pool_size x final_size`.
<del> # ResNet does an Average Pooling layer over pool_size,
<del> # but that is the same as doing a reduce_mean. We do a reduce_mean
<del> # here because it performs better than AveragePooling2D.
<del> axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
<del> inputs = tf.reduce_mean(inputs, axes, keepdims=True)
<del> inputs = tf.identity(inputs, 'final_reduce_mean')
<del>
<del> inputs = tf.reshape(inputs, [-1, self.final_size])
<del> inputs = tf.layers.dense(inputs=inputs, units=self.num_classes)
<del> inputs = tf.identity(inputs, 'final_dense')
<del>
<del> return inputs
<add> with self._model_variable_scope():
<add> if self.data_format == 'channels_first':
<add> # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).
<add> # This provides a large performance boost on GPU. See
<add> # https://www.tensorflow.org/performance/performance_guide#data_formats
<add> inputs = tf.transpose(inputs, [0, 3, 1, 2])
<add>
<add> inputs = conv2d_fixed_padding(
<add> inputs=inputs, filters=self.num_filters, kernel_size=self.kernel_size,
<add> strides=self.conv_stride, data_format=self.data_format)
<add> inputs = tf.identity(inputs, 'initial_conv')
<add>
<add> if self.first_pool_size:
<add> inputs = tf.layers.max_pooling2d(
<add> inputs=inputs, pool_size=self.first_pool_size,
<add> strides=self.first_pool_stride, padding='SAME',
<add> data_format=self.data_format)
<add> inputs = tf.identity(inputs, 'initial_max_pool')
<add>
<add> for i, num_blocks in enumerate(self.block_sizes):
<add> num_filters = self.num_filters * (2**i)
<add> inputs = block_layer(
<add> inputs=inputs, filters=num_filters, bottleneck=self.bottleneck,
<add> block_fn=self.block_fn, blocks=num_blocks,
<add> strides=self.block_strides[i], training=training,
<add> name='block_layer{}'.format(i + 1), data_format=self.data_format)
<add>
<add> inputs = batch_norm(inputs, training, self.data_format)
<add> inputs = tf.nn.relu(inputs)
<add>
<add> # The current top layer has shape
<add> # `batch_size x pool_size x pool_size x final_size`.
<add> # ResNet does an Average Pooling layer over pool_size,
<add> # but that is the same as doing a reduce_mean. We do a reduce_mean
<add> # here because it performs better than AveragePooling2D.
<add> axes = [2, 3] if self.data_format == 'channels_first' else [1, 2]
<add> inputs = tf.reduce_mean(inputs, axes, keepdims=True)
<add> inputs = tf.identity(inputs, 'final_reduce_mean')
<add>
<add> inputs = tf.reshape(inputs, [-1, self.final_size])
<add> inputs = tf.layers.dense(inputs=inputs, units=self.num_classes)
<add> inputs = tf.identity(inputs, 'final_dense')
<add> return inputs
<ide><path>official/resnet/resnet_run_loop.py
<ide> def learning_rate_fn(global_step):
<ide>
<ide> def resnet_model_fn(features, labels, mode, model_class,
<ide> resnet_size, weight_decay, learning_rate_fn, momentum,
<del> data_format, version, loss_filter_fn=None, multi_gpu=False):
<add> data_format, version, loss_scale,
<add> loss_filter_fn=None, multi_gpu=False,
<add> dtype=resnet_model.DEFAULT_DTYPE):
<ide> """Shared functionality for different resnet model_fns.
<ide>
<ide> Initializes the ResnetModel representing the model layers
<ide> def resnet_model_fn(features, labels, mode, model_class,
<ide> If set to None, the format is dependent on whether a GPU is available.
<ide> version: Integer representing which version of the ResNet network to use.
<ide> See README for details. Valid values: [1, 2]
<add> loss_scale: The factor to scale the loss for numerical stability. A detailed
<add> summary is present in the arg parser help text.
<ide> loss_filter_fn: function that takes a string variable name and returns
<ide> True if the var should be included in loss calculation, and False
<ide> otherwise. If None, batch_normalization variables will be excluded
<ide> from the loss.
<ide> multi_gpu: If True, wrap the optimizer in a TowerOptimizer suitable for
<ide> data-parallel distribution across multiple GPUs.
<add> dtype: the TensorFlow dtype to use for calculations.
<ide>
<ide> Returns:
<ide> EstimatorSpec parameterized according to the input params and the
<ide> def resnet_model_fn(features, labels, mode, model_class,
<ide> # Generate a summary node for the images
<ide> tf.summary.image('images', features, max_outputs=6)
<ide>
<del> model = model_class(resnet_size, data_format, version=version)
<add> features = tf.cast(features, dtype)
<add>
<add> model = model_class(resnet_size, data_format, version=version, dtype=dtype)
<add>
<ide> logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
<ide>
<add> # This acts as a no-op if the logits are already in fp32 (provided logits are
<add> # not a SparseTensor). If dtype is is low precision, logits must be cast to
<add> # fp32 for numerical stability.
<add> logits = tf.cast(logits, tf.float32)
<add>
<ide> predictions = {
<ide> 'classes': tf.argmax(logits, axis=1),
<ide> 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
<ide> def exclude_batch_norm(name):
<ide>
<ide> # Add weight decay to the loss.
<ide> l2_loss = weight_decay * tf.add_n(
<del> [tf.nn.l2_loss(v) for v in tf.trainable_variables()
<add> # loss is computed using fp32 for numerical stability.
<add> [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
<ide> if loss_filter_fn(v.name)])
<ide> tf.summary.scalar('l2_loss', l2_loss)
<ide> loss = cross_entropy + l2_loss
<ide> def exclude_batch_norm(name):
<ide> if multi_gpu:
<ide> optimizer = tf.contrib.estimator.TowerOptimizer(optimizer)
<ide>
<add> if loss_scale != 1:
<add> # When computing fp16 gradients, often intermediate tensor values are
<add> # so small, they underflow to 0. To avoid this, we multiply the loss by
<add> # loss_scale to make these tensor values loss_scale times bigger.
<add> scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
<add>
<add> # Once the gradient computation is complete we can scale the gradients
<add> # back to the correct scale before passing them to the optimizer.
<add> unscaled_grad_vars = [(grad / loss_scale, var)
<add> for grad, var in scaled_grad_vars]
<add> minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
<add> else:
<add> minimize_op = optimizer.minimize(loss, global_step)
<add>
<ide> update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
<del> train_op = tf.group(optimizer.minimize(loss, global_step), update_ops)
<add> train_op = tf.group(minimize_op, update_ops)
<ide> else:
<ide> train_op = None
<ide>
<ide> def resnet_main(flags, model_function, input_function, shape=None):
<ide> 'batch_size': flags.batch_size,
<ide> 'multi_gpu': flags.multi_gpu,
<ide> 'version': flags.version,
<add> 'loss_scale': flags.loss_scale,
<add> 'dtype': flags.dtype
<ide> })
<ide>
<ide> if flags.benchmark_log_dir is not None:
<ide> benchmark_logger = logger.BenchmarkLogger(flags.benchmark_log_dir)
<del> benchmark_logger.log_run_info("resnet")
<add> benchmark_logger.log_run_info('resnet')
<ide> else:
<ide> benchmark_logger = None
<ide>
<ide> def __init__(self, resnet_size_choices=None):
<ide> help='[default: %(default)s] The size of the ResNet model to use.',
<ide> metavar='<RS>' if resnet_size_choices is None else None
<ide> )
<add>
<add> def parse_args(self, args=None, namespace=None):
<add> args = super(ResnetArgParser, self).parse_args(
<add> args=args, namespace=namespace)
<add>
<add> # handle coupling between dtype and loss_scale
<add> parsers.parse_dtype_info(args)
<add>
<add> return args
<ide><path>official/utils/arg_parsers/parsers.py
<ide> def __init__(self):
<ide> from __future__ import division
<ide> from __future__ import print_function
<ide>
<del>
<ide> import argparse
<ide>
<add>import tensorflow as tf
<add>
<add>
<add># Map string to (TensorFlow dtype, default loss scale)
<add>DTYPE_MAP = {
<add> "fp16": (tf.float16, 128),
<add> "fp32": (tf.float32, 1),
<add>}
<add>
<add>
<add>def parse_dtype_info(flags):
<add> """Convert dtype string to tf dtype, and set loss_scale default as needed.
<add>
<add> Args:
<add> flags: namespace object returned by arg parser.
<add>
<add> Raises:
<add> ValueError: If an invalid dtype is provided.
<add> """
<add> if flags.dtype in (i[0] for i in DTYPE_MAP.values()):
<add> return # Make function idempotent
<add>
<add> try:
<add> flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype]
<add> except KeyError:
<add> raise ValueError("Invalid dtype: {}".format(flags.dtype))
<add>
<add> flags.loss_scale = flags.loss_scale or default_loss_scale
<add>
<ide>
<ide> class BaseParser(argparse.ArgumentParser):
<ide> """Parser to contain flags which will be nearly universal across models.
<ide> class PerformanceParser(argparse.ArgumentParser):
<ide> """
<ide>
<ide> def __init__(self, add_help=False, num_parallel_calls=True, inter_op=True,
<del> intra_op=True, use_synthetic_data=True, max_train_steps=True):
<add> intra_op=True, use_synthetic_data=True, max_train_steps=True,
<add> dtype=True):
<ide> super(PerformanceParser, self).__init__(add_help=add_help)
<ide>
<ide> if num_parallel_calls:
<ide> def __init__(self, add_help=False, num_parallel_calls=True, inter_op=True,
<ide> metavar="<MTS>"
<ide> )
<ide>
<add> if dtype:
<add> self.add_argument(
<add> "--dtype", "-dt",
<add> default="fp32",
<add> choices=list(DTYPE_MAP.keys()),
<add> help="[default: %(default)s] {%(choices)s} The TensorFlow datatype "
<add> "used for calculations. Variables may be cast to a higher"
<add> "precision on a case-by-case basis for numerical stability.",
<add> metavar="<DT>"
<add> )
<add>
<add> self.add_argument(
<add> "--loss_scale", "-ls",
<add> type=int,
<add> help="[default: %(default)s] The amount to scale the loss by when "
<add> "the model is run. Before gradients are computed, the loss is "
<add> "multiplied by the loss scale, making all gradients loss_scale "
<add> "times larger. To adjust for this, gradients are divided by the "
<add> "loss scale before being applied to variables. This is "
<add> "mathematically equivalent to training without a loss scale, "
<add> "but the loss scale helps avoid some intermediate gradients "
<add> "from underflowing to zero. If not provided the default for "
<add> "fp16 is 128 and 1 for all other dtypes.",
<add> )
<add>
<ide>
<ide> class ImageModelParser(argparse.ArgumentParser):
<ide> """Default parser for specification image specific behavior.
<ide><path>official/utils/arg_parsers/parsers_test.py
<ide> import argparse
<ide> import unittest
<ide>
<add>import tensorflow as tf # pylint: disable=g-bad-import-order
<ide>
<ide> from official.utils.arg_parsers import parsers
<ide>
<ide> def test_booleans(self):
<ide> assert namespace.multi_gpu
<ide> assert namespace.use_synthetic_data
<ide>
<add> def test_parse_dtype_info(self):
<add> parser = TestParser()
<add> for dtype_str, tf_dtype, loss_scale in [["fp16", tf.float16, 128],
<add> ["fp32", tf.float32, 1]]:
<add> args = parser.parse_args(["--dtype", dtype_str])
<add> parsers.parse_dtype_info(args)
<add>
<add> assert args.dtype == tf_dtype
<add> assert args.loss_scale == loss_scale
<add>
<add> args = parser.parse_args(["--dtype", dtype_str, "--loss_scale", "5"])
<add> parsers.parse_dtype_info(args)
<add>
<add> assert args.loss_scale == 5
<add>
<add> with self.assertRaises(SystemExit):
<add> parser.parse_args(["--dtype", "int8"])
<add>
<ide>
<ide> if __name__ == "__main__":
<ide> unittest.main()
| 9
|
PHP
|
PHP
|
add support for amazon elasticache
|
c7dacb05e70fe14472663535e5734872a795daa4
|
<ide><path>lib/Cake/Cache/Engine/MemcachedEngine.php
<ide> protected function _setOptions() {
<ide> $this->_Memcached->setOption(Memcached::OPT_SERIALIZER, Memcached::SERIALIZER_IGBINARY);
<ide> }
<ide>
<add> // Check for Amazon ElastiCache instance
<add> if (defined('Memcached::OPT_CLIENT_MODE') && defined('Memcached::DYNAMIC_CLIENT_MODE')) {
<add> $this->_Memcached->setOption(Memcached::OPT_CLIENT_MODE, Memcached::DYNAMIC_CLIENT_MODE);
<add> }
<add>
<ide> $this->_Memcached->setOption(Memcached::OPT_COMPRESSION, (bool)$this->settings['compress']);
<ide> }
<ide>
| 1
|
Javascript
|
Javascript
|
fix typeof comparison
|
f34e88a18db506bf425ace5dfcc3aa32d449fe6f
|
<ide><path>test/parallel/test-assert-calltracker-report.js
<ide> callsfoo();
<ide>
<ide> // Ensures that foo was removed from the callChecks array after being called the
<ide> // expected number of times.
<del>if (typeof tracker.report()[0] === undefined) {
<add>if (typeof tracker.report()[0] === 'undefined') {
<ide> process.exit(1);
<ide> }
<ide>
| 1
|
Go
|
Go
|
adopt text/template in node inspect
|
5dbd6afb5144a937df73bfe42364e583e789d80d
|
<ide><path>cli/command/formatter/node.go
<ide> package formatter
<ide>
<ide> import (
<add> "fmt"
<add> "strings"
<add>
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/swarm"
<ide> "github.com/docker/docker/cli/command"
<add> "github.com/docker/docker/cli/command/inspect"
<add> units "github.com/docker/go-units"
<ide> )
<ide>
<ide> const (
<del> defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}"
<del>
<add> defaultNodeTableFormat = "table {{.ID}} {{if .Self}}*{{else}} {{ end }}\t{{.Hostname}}\t{{.Status}}\t{{.Availability}}\t{{.ManagerStatus}}"
<add> nodeInspectPrettyTemplate Format = `ID: {{.ID}}
<add>{{- if .Name }}
<add>Name: {{.Name}}
<add>{{- end }}
<add>{{- if .Labels }}
<add>Labels:
<add>{{- range $k, $v := .Labels }}
<add> - {{ $k }}{{if $v }}={{ $v }}{{ end }}
<add>{{- end }}{{ end }}
<add>Hostname: {{.Hostname}}
<add>Joined at: {{.CreatedAt}}
<add>Status:
<add> State: {{.StatusState}}
<add> {{- if .HasStatusMessage}}
<add> Message: {{.StatusMessage}}
<add> {{- end}}
<add> Availability: {{.SpecAvailability}}
<add> {{- if .Status.Addr}}
<add> Address: {{.StatusAddr}}
<add> {{- end}}
<add>{{- if .HasManagerStatus}}
<add>Manager Status:
<add> Address: {{.ManagerStatusAddr}}
<add> Raft Status: {{.ManagerStatusReachability}}
<add> {{- if .IsManagerStatusLeader}}
<add> Leader: Yes
<add> {{- else}}
<add> Leader: No
<add> {{- end}}
<add>{{- end}}
<add>Platform:
<add> Operating System: {{.PlatformOS}}
<add> Architecture: {{.PlatformArchitecture}}
<add>Resources:
<add> CPUs: {{.ResourceNanoCPUs}}
<add> Memory: {{.ResourceMemory}}
<add>{{- if .HasEnginePlugins}}
<add>Plugins:
<add>{{- range $k, $v := .EnginePlugins }}
<add> {{ $k }}:{{if $v }} {{ $v }}{{ end }}
<add>{{- end }}
<add>{{- end }}
<add>Engine Version: {{.EngineVersion}}
<add>{{- if .EngineLabels}}
<add>Engine Labels:
<add>{{- range $k, $v := .EngineLabels }}
<add> - {{ $k }}{{if $v }}={{ $v }}{{ end }}
<add>{{- end }}{{- end }}
<add>`
<ide> nodeIDHeader = "ID"
<ide> selfHeader = ""
<ide> hostnameHeader = "HOSTNAME"
<ide> const (
<ide> // NewNodeFormat returns a Format for rendering using a node Context
<ide> func NewNodeFormat(source string, quiet bool) Format {
<ide> switch source {
<add> case PrettyFormatKey:
<add> return nodeInspectPrettyTemplate
<ide> case TableFormatKey:
<ide> if quiet {
<ide> return defaultQuietFormat
<ide> func (c *nodeContext) ManagerStatus() string {
<ide> }
<ide> return command.PrettyPrint(reachability)
<ide> }
<add>
<add>// NodeInspectWrite renders the context for a list of services
<add>func NodeInspectWrite(ctx Context, refs []string, getRef inspect.GetRefFunc) error {
<add> if ctx.Format != nodeInspectPrettyTemplate {
<add> return inspect.Inspect(ctx.Output, refs, string(ctx.Format), getRef)
<add> }
<add> render := func(format func(subContext subContext) error) error {
<add> for _, ref := range refs {
<add> nodeI, _, err := getRef(ref)
<add> if err != nil {
<add> return err
<add> }
<add> node, ok := nodeI.(swarm.Node)
<add> if !ok {
<add> return fmt.Errorf("got wrong object to inspect :%v", ok)
<add> }
<add> if err := format(&nodeInspectContext{Node: node}); err != nil {
<add> return err
<add> }
<add> }
<add> return nil
<add> }
<add> return ctx.Write(&nodeInspectContext{}, render)
<add>}
<add>
<add>type nodeInspectContext struct {
<add> swarm.Node
<add> subContext
<add>}
<add>
<add>func (ctx *nodeInspectContext) ID() string {
<add> return ctx.Node.ID
<add>}
<add>
<add>func (ctx *nodeInspectContext) Name() string {
<add> return ctx.Node.Spec.Name
<add>}
<add>
<add>func (ctx *nodeInspectContext) Labels() map[string]string {
<add> return ctx.Node.Spec.Labels
<add>}
<add>
<add>func (ctx *nodeInspectContext) Hostname() string {
<add> return ctx.Node.Description.Hostname
<add>}
<add>
<add>func (ctx *nodeInspectContext) CreatedAt() string {
<add> return command.PrettyPrint(ctx.Node.CreatedAt)
<add>}
<add>
<add>func (ctx *nodeInspectContext) StatusState() string {
<add> return command.PrettyPrint(ctx.Node.Status.State)
<add>}
<add>
<add>func (ctx *nodeInspectContext) HasStatusMessage() bool {
<add> return ctx.Node.Status.Message != ""
<add>}
<add>
<add>func (ctx *nodeInspectContext) StatusMessage() string {
<add> return command.PrettyPrint(ctx.Node.Status.Message)
<add>}
<add>
<add>func (ctx *nodeInspectContext) SpecAvailability() string {
<add> return command.PrettyPrint(ctx.Node.Spec.Availability)
<add>}
<add>
<add>func (ctx *nodeInspectContext) HasStatusAddr() bool {
<add> return ctx.Node.Status.Addr != ""
<add>}
<add>
<add>func (ctx *nodeInspectContext) StatusAddr() string {
<add> return ctx.Node.Status.Addr
<add>}
<add>
<add>func (ctx *nodeInspectContext) HasManagerStatus() bool {
<add> return ctx.Node.ManagerStatus != nil
<add>}
<add>
<add>func (ctx *nodeInspectContext) ManagerStatusAddr() string {
<add> return ctx.Node.ManagerStatus.Addr
<add>}
<add>
<add>func (ctx *nodeInspectContext) ManagerStatusReachability() string {
<add> return command.PrettyPrint(ctx.Node.ManagerStatus.Reachability)
<add>}
<add>
<add>func (ctx *nodeInspectContext) IsManagerStatusLeader() bool {
<add> return ctx.Node.ManagerStatus.Leader
<add>}
<add>
<add>func (ctx *nodeInspectContext) PlatformOS() string {
<add> return ctx.Node.Description.Platform.OS
<add>}
<add>
<add>func (ctx *nodeInspectContext) PlatformArchitecture() string {
<add> return ctx.Node.Description.Platform.Architecture
<add>}
<add>
<add>func (ctx *nodeInspectContext) ResourceNanoCPUs() int {
<add> if ctx.Node.Description.Resources.NanoCPUs == 0 {
<add> return int(0)
<add> }
<add> return int(ctx.Node.Description.Resources.NanoCPUs) / 1e9
<add>}
<add>
<add>func (ctx *nodeInspectContext) ResourceMemory() string {
<add> if ctx.Node.Description.Resources.MemoryBytes == 0 {
<add> return ""
<add> }
<add> return units.BytesSize(float64(ctx.Node.Description.Resources.MemoryBytes))
<add>}
<add>
<add>func (ctx *nodeInspectContext) HasEnginePlugins() bool {
<add> return len(ctx.Node.Description.Engine.Plugins) > 0
<add>}
<add>
<add>func (ctx *nodeInspectContext) EnginePlugins() map[string]string {
<add> pluginMap := map[string][]string{}
<add> for _, p := range ctx.Node.Description.Engine.Plugins {
<add> pluginMap[p.Type] = append(pluginMap[p.Type], p.Name)
<add> }
<add>
<add> pluginNamesByType := map[string]string{}
<add> for k, v := range pluginMap {
<add> pluginNamesByType[k] = strings.Join(v, ", ")
<add> }
<add> return pluginNamesByType
<add>}
<add>
<add>func (ctx *nodeInspectContext) EngineLabels() map[string]string {
<add> return ctx.Node.Description.Engine.Labels
<add>}
<add>
<add>func (ctx *nodeInspectContext) EngineVersion() string {
<add> return ctx.Node.Description.Engine.EngineVersion
<add>}
<ide><path>cli/command/node/inspect.go
<ide> package node
<ide>
<ide> import (
<ide> "fmt"
<del> "io"
<del> "sort"
<ide> "strings"
<ide>
<del> "github.com/docker/docker/api/types/swarm"
<ide> "github.com/docker/docker/cli"
<ide> "github.com/docker/docker/cli/command"
<del> "github.com/docker/docker/cli/command/inspect"
<del> "github.com/docker/docker/pkg/ioutils"
<del> "github.com/docker/go-units"
<add> "github.com/docker/docker/cli/command/formatter"
<ide> "github.com/spf13/cobra"
<ide> "golang.org/x/net/context"
<ide> )
<ide> func newInspectCommand(dockerCli command.Cli) *cobra.Command {
<ide> func runInspect(dockerCli command.Cli, opts inspectOptions) error {
<ide> client := dockerCli.Client()
<ide> ctx := context.Background()
<add>
<add> if opts.pretty {
<add> opts.format = "pretty"
<add> }
<add>
<ide> getRef := func(ref string) (interface{}, []byte, error) {
<ide> nodeRef, err := Reference(ctx, client, ref)
<ide> if err != nil {
<ide> func runInspect(dockerCli command.Cli, opts inspectOptions) error {
<ide> node, _, err := client.NodeInspectWithRaw(ctx, nodeRef)
<ide> return node, nil, err
<ide> }
<add> f := opts.format
<ide>
<del> if !opts.pretty {
<del> return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef)
<add> // check if the user is trying to apply a template to the pretty format, which
<add> // is not supported
<add> if strings.HasPrefix(f, "pretty") && f != "pretty" {
<add> return fmt.Errorf("Cannot supply extra formatting options to the pretty template")
<ide> }
<del> return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef)
<del>}
<del>
<del>func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error {
<del> for idx, ref := range refs {
<del> obj, _, err := getRef(ref)
<del> if err != nil {
<del> return err
<del> }
<del> printNode(out, obj.(swarm.Node))
<ide>
<del> // TODO: better way to do this?
<del> // print extra space between objects, but not after the last one
<del> if idx+1 != len(refs) {
<del> fmt.Fprintf(out, "\n\n")
<del> } else {
<del> fmt.Fprintf(out, "\n")
<del> }
<add> nodeCtx := formatter.Context{
<add> Output: dockerCli.Out(),
<add> Format: formatter.NewNodeFormat(f, false),
<ide> }
<del> return nil
<del>}
<ide>
<del>// TODO: use a template
<del>func printNode(out io.Writer, node swarm.Node) {
<del> fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID)
<del> ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name)
<del> if node.Spec.Labels != nil {
<del> fmt.Fprintln(out, "Labels:")
<del> for k, v := range node.Spec.Labels {
<del> fmt.Fprintf(out, " - %s = %s\n", k, v)
<del> }
<del> }
<del>
<del> ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname)
<del> fmt.Fprintf(out, "Joined at:\t\t%s\n", command.PrettyPrint(node.CreatedAt))
<del> fmt.Fprintln(out, "Status:")
<del> fmt.Fprintf(out, " State:\t\t\t%s\n", command.PrettyPrint(node.Status.State))
<del> ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", command.PrettyPrint(node.Status.Message))
<del> fmt.Fprintf(out, " Availability:\t\t%s\n", command.PrettyPrint(node.Spec.Availability))
<del> ioutils.FprintfIfNotEmpty(out, " Address:\t\t%s\n", command.PrettyPrint(node.Status.Addr))
<del>
<del> if node.ManagerStatus != nil {
<del> fmt.Fprintln(out, "Manager Status:")
<del> fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr)
<del> fmt.Fprintf(out, " Raft Status:\t\t%s\n", command.PrettyPrint(node.ManagerStatus.Reachability))
<del> leader := "No"
<del> if node.ManagerStatus.Leader {
<del> leader = "Yes"
<del> }
<del> fmt.Fprintf(out, " Leader:\t\t%s\n", leader)
<del> }
<del>
<del> fmt.Fprintln(out, "Platform:")
<del> fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS)
<del> fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture)
<del>
<del> fmt.Fprintln(out, "Resources:")
<del> fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9)
<del> fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes)))
<del>
<del> var pluginTypes []string
<del> pluginNamesByType := map[string][]string{}
<del> for _, p := range node.Description.Engine.Plugins {
<del> // append to pluginTypes only if not done previously
<del> if _, ok := pluginNamesByType[p.Type]; !ok {
<del> pluginTypes = append(pluginTypes, p.Type)
<del> }
<del> pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name)
<del> }
<del>
<del> if len(pluginTypes) > 0 {
<del> fmt.Fprintln(out, "Plugins:")
<del> sort.Strings(pluginTypes) // ensure stable output
<del> for _, pluginType := range pluginTypes {
<del> fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", "))
<del> }
<del> }
<del> fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion)
<del>
<del> if len(node.Description.Engine.Labels) != 0 {
<del> fmt.Fprintln(out, "Engine Labels:")
<del> for k, v := range node.Description.Engine.Labels {
<del> fmt.Fprintf(out, " - %s = %s\n", k, v)
<del> }
<add> if err := formatter.NodeInspectWrite(nodeCtx, opts.nodeIds, getRef); err != nil {
<add> return cli.StatusError{StatusCode: 1, Status: err.Error()}
<ide> }
<add> return nil
<ide> }
| 2
|
Mixed
|
Javascript
|
use code markup/markdown in headers
|
57f75376cc5b55b434b0c4c294307494c667dee2
|
<ide><path>doc/api/errors.md
<ide> Throwing an error inside the callback **can crash the Node.js process** in most
<ide> cases. If [domains][] are enabled, or a handler has been registered with
<ide> `process.on('uncaughtException')`, such errors can be intercepted.
<ide>
<del>## Class: Error
<add>## Class: `Error`
<ide>
<ide> <!--type=class-->
<ide>
<ide> provide a text description of the error.
<ide> All errors generated by Node.js, including all System and JavaScript errors,
<ide> will either be instances of, or inherit from, the `Error` class.
<ide>
<del>### new Error(message)
<add>### `new Error(message)`
<ide>
<ide> * `message` {string}
<ide>
<ide> are dependent on [V8's stack trace API][]. Stack traces extend only to either
<ide> (a) the beginning of *synchronous code execution*, or (b) the number of frames
<ide> given by the property `Error.stackTraceLimit`, whichever is smaller.
<ide>
<del>### Error.captureStackTrace(targetObject\[, constructorOpt\])
<add>### `Error.captureStackTrace(targetObject[, constructorOpt])`
<ide>
<ide> * `targetObject` {Object}
<ide> * `constructorOpt` {Function}
<ide> function MyError() {
<ide> new MyError().stack;
<ide> ```
<ide>
<del>### Error.stackTraceLimit
<add>### `Error.stackTraceLimit`
<ide>
<ide> * {number}
<ide>
<ide> will affect any stack trace captured *after* the value has been changed.
<ide> If set to a non-number value, or set to a negative number, stack traces will
<ide> not capture any frames.
<ide>
<del>### error.code
<add>### `error.code`
<ide>
<ide> * {string}
<ide>
<ide> between major versions of Node.js. In contrast, `error.message` strings may
<ide> change between any versions of Node.js. See [Node.js Error Codes][] for details
<ide> about specific codes.
<ide>
<del>### error.message
<add>### `error.message`
<ide>
<ide> * {string}
<ide>
<ide> console.error(err.message);
<ide> // Prints: The message
<ide> ```
<ide>
<del>### error.stack
<add>### `error.stack`
<ide>
<ide> * {string}
<ide>
<ide> The number of frames captured by the stack trace is bounded by the smaller of
<ide> `Error.stackTraceLimit` or the number of available frames on the current event
<ide> loop tick.
<ide>
<del>## Class: AssertionError
<add>## Class: `AssertionError`
<ide>
<ide> * Extends: {errors.Error}
<ide>
<ide> Indicates the failure of an assertion. For details, see
<ide> [`Class: assert.AssertionError`][].
<ide>
<del>## Class: RangeError
<add>## Class: `RangeError`
<ide>
<ide> * Extends: {errors.Error}
<ide>
<ide> require('net').connect(-1);
<ide> Node.js will generate and throw `RangeError` instances *immediately* as a form
<ide> of argument validation.
<ide>
<del>## Class: ReferenceError
<add>## Class: `ReferenceError`
<ide>
<ide> * Extends: {errors.Error}
<ide>
<ide> Unless an application is dynamically generating and running code,
<ide> `ReferenceError` instances should always be considered a bug in the code
<ide> or its dependencies.
<ide>
<del>## Class: SyntaxError
<add>## Class: `SyntaxError`
<ide>
<ide> * Extends: {errors.Error}
<ide>
<ide> try {
<ide> `SyntaxError` instances are unrecoverable in the context that created them –
<ide> they may only be caught by other contexts.
<ide>
<del>## Class: SystemError
<add>## Class: `SystemError`
<ide>
<ide> * Extends: {errors.Error}
<ide>
<ide> attempts to read a file that does not exist.
<ide> * `port` {number} If present, the network connection port that is not available
<ide> * `syscall` {string} The name of the system call that triggered the error
<ide>
<del>### error.address
<add>### `error.address`
<ide>
<ide> * {string}
<ide>
<ide> If present, `error.address` is a string describing the address to which a
<ide> network connection failed.
<ide>
<del>### error.code
<add>### `error.code`
<ide>
<ide> * {string}
<ide>
<ide> The `error.code` property is a string representing the error code.
<ide>
<del>### error.dest
<add>### `error.dest`
<ide>
<ide> * {string}
<ide>
<ide> If present, `error.dest` is the file path destination when reporting a file
<ide> system error.
<ide>
<del>### error.errno
<add>### `error.errno`
<ide>
<ide> * {number}
<ide>
<ide> On Windows the error number provided by the system will be normalized by libuv.
<ide> To get the string representation of the error code, use
<ide> [`util.getSystemErrorName(error.errno)`][].
<ide>
<del>### error.info
<add>### `error.info`
<ide>
<ide> * {Object}
<ide>
<ide> If present, `error.info` is an object with details about the error condition.
<ide>
<del>### error.message
<add>### `error.message`
<ide>
<ide> * {string}
<ide>
<ide> `error.message` is a system-provided human-readable description of the error.
<ide>
<del>### error.path
<add>### `error.path`
<ide>
<ide> * {string}
<ide>
<ide> If present, `error.path` is a string containing a relevant invalid pathname.
<ide>
<del>### error.port
<add>### `error.port`
<ide>
<ide> * {number}
<ide>
<ide> If present, `error.port` is the network connection port that is not available.
<ide>
<del>### error.syscall
<add>### `error.syscall`
<ide>
<ide> * {string}
<ide>
<ide> program. For a comprehensive list, see the [`errno`(3) man page][].
<ide> encountered by [`http`][] or [`net`][] — often a sign that a `socket.end()`
<ide> was not properly called.
<ide>
<del>## Class: TypeError
<add>## Class: `TypeError`
<ide>
<ide> * Extends {errors.Error}
<ide>
<ide> Errors originating in `crypto` or `tls` are of class `Error`, and in addition to
<ide> the standard `.code` and `.message` properties, may have some additional
<ide> OpenSSL-specific properties.
<ide>
<del>### error.opensslErrorStack
<add>### `error.opensslErrorStack`
<ide>
<ide> An array of errors that can give context to where in the OpenSSL library an
<ide> error originates from.
<ide>
<del>### error.function
<add>### `error.function`
<ide>
<ide> The OpenSSL function the error originates in.
<ide>
<del>### error.library
<add>### `error.library`
<ide>
<ide> The OpenSSL library the error originates in.
<ide>
<del>### error.reason
<add>### `error.reason`
<ide>
<ide> A human-readable string describing the reason for the error.
<ide>
<ide> <a id="nodejs-error-codes"></a>
<ide> ## Node.js Error Codes
<ide>
<ide> <a id="ERR_AMBIGUOUS_ARGUMENT"></a>
<del>### ERR_AMBIGUOUS_ARGUMENT
<add>### `ERR_AMBIGUOUS_ARGUMENT`
<ide>
<ide> A function argument is being used in a way that suggests that the function
<ide> signature may be misunderstood. This is thrown by the `assert` module when the
<ide> is the expected message rather than the message the `AssertionError` will
<ide> display if `block` does not throw.
<ide>
<ide> <a id="ERR_ARG_NOT_ITERABLE"></a>
<del>### ERR_ARG_NOT_ITERABLE
<add>### `ERR_ARG_NOT_ITERABLE`
<ide>
<ide> An iterable argument (i.e. a value that works with `for...of` loops) was
<ide> required, but not provided to a Node.js API.
<ide>
<ide> <a id="ERR_ASSERTION"></a>
<del>### ERR_ASSERTION
<add>### `ERR_ASSERTION`
<ide>
<ide> A special type of error that can be triggered whenever Node.js detects an
<ide> exceptional logic violation that should never occur. These are raised typically
<ide> by the `assert` module.
<ide>
<ide> <a id="ERR_ASYNC_CALLBACK"></a>
<del>### ERR_ASYNC_CALLBACK
<add>### `ERR_ASYNC_CALLBACK`
<ide>
<ide> An attempt was made to register something that is not a function as an
<ide> `AsyncHooks` callback.
<ide>
<ide> <a id="ERR_ASYNC_TYPE"></a>
<del>### ERR_ASYNC_TYPE
<add>### `ERR_ASYNC_TYPE`
<ide>
<ide> The type of an asynchronous resource was invalid. Users are also able
<ide> to define their own types if using the public embedder API.
<ide>
<ide> <a id="ERR_BROTLI_COMPRESSION_FAILED"></a>
<del>### ERR_BROTLI_COMPRESSION_FAILED
<add>### `ERR_BROTLI_COMPRESSION_FAILED`
<ide>
<ide> Data passed to a Brotli stream was not successfully compressed.
<ide>
<ide> <a id="ERR_BROTLI_INVALID_PARAM"></a>
<del>### ERR_BROTLI_INVALID_PARAM
<add>### `ERR_BROTLI_INVALID_PARAM`
<ide>
<ide> An invalid parameter key was passed during construction of a Brotli stream.
<ide>
<ide> <a id="ERR_BUFFER_CONTEXT_NOT_AVAILABLE"></a>
<del>### ERR_BUFFER_CONTEXT_NOT_AVAILABLE
<add>### `ERR_BUFFER_CONTEXT_NOT_AVAILABLE`
<ide>
<ide> An attempt was made to create a Node.js `Buffer` instance from addon or embedder
<ide> code, while in a JS engine Context that is not associated with a Node.js
<ide> prototype of the resulting object. `Uint8Array`s are generally accepted in all
<ide> Node.js core APIs where `Buffer`s are; they are available in all Contexts.
<ide>
<ide> <a id="ERR_BUFFER_OUT_OF_BOUNDS"></a>
<del>### ERR_BUFFER_OUT_OF_BOUNDS
<add>### `ERR_BUFFER_OUT_OF_BOUNDS`
<ide>
<ide> An operation outside the bounds of a `Buffer` was attempted.
<ide>
<ide> <a id="ERR_BUFFER_TOO_LARGE"></a>
<del>### ERR_BUFFER_TOO_LARGE
<add>### `ERR_BUFFER_TOO_LARGE`
<ide>
<ide> An attempt has been made to create a `Buffer` larger than the maximum allowed
<ide> size.
<ide>
<ide> <a id="ERR_CANNOT_WATCH_SIGINT"></a>
<del>### ERR_CANNOT_WATCH_SIGINT
<add>### `ERR_CANNOT_WATCH_SIGINT`
<ide>
<ide> Node.js was unable to watch for the `SIGINT` signal.
<ide>
<ide> <a id="ERR_CHILD_CLOSED_BEFORE_REPLY"></a>
<del>### ERR_CHILD_CLOSED_BEFORE_REPLY
<add>### `ERR_CHILD_CLOSED_BEFORE_REPLY`
<ide>
<ide> A child process was closed before the parent received a reply.
<ide>
<ide> <a id="ERR_CHILD_PROCESS_IPC_REQUIRED"></a>
<del>### ERR_CHILD_PROCESS_IPC_REQUIRED
<add>### `ERR_CHILD_PROCESS_IPC_REQUIRED`
<ide>
<ide> Used when a child process is being forked without specifying an IPC channel.
<ide>
<ide> <a id="ERR_CHILD_PROCESS_STDIO_MAXBUFFER"></a>
<del>### ERR_CHILD_PROCESS_STDIO_MAXBUFFER
<add>### `ERR_CHILD_PROCESS_STDIO_MAXBUFFER`
<ide>
<ide> Used when the main process is trying to read data from the child process's
<ide> STDERR/STDOUT, and the data's length is longer than the `maxBuffer` option.
<ide>
<ide> <a id="ERR_CONSOLE_WRITABLE_STREAM"></a>
<del>### ERR_CONSOLE_WRITABLE_STREAM
<add>### `ERR_CONSOLE_WRITABLE_STREAM`
<ide>
<ide> `Console` was instantiated without `stdout` stream, or `Console` has a
<ide> non-writable `stdout` or `stderr` stream.
<ide>
<ide> <a id="ERR_CONSTRUCT_CALL_REQUIRED"></a>
<del>### ERR_CONSTRUCT_CALL_REQUIRED
<add>### `ERR_CONSTRUCT_CALL_REQUIRED`
<ide>
<ide> A constructor for a class was called without `new`.
<ide>
<ide> <a id="ERR_CONSTRUCT_CALL_INVALID"></a>
<del>### ERR_CONSTRUCT_CALL_INVALID
<add>### `ERR_CONSTRUCT_CALL_INVALID`
<ide> <!--
<ide> added: v12.5.0
<ide> -->
<ide>
<ide> A class constructor was called that is not callable.
<ide>
<ide> <a id="ERR_CPU_USAGE"></a>
<del>### ERR_CPU_USAGE
<add>### `ERR_CPU_USAGE`
<ide>
<ide> The native call from `process.cpuUsage` could not be processed.
<ide>
<ide> <a id="ERR_CRYPTO_CUSTOM_ENGINE_NOT_SUPPORTED"></a>
<del>### ERR_CRYPTO_CUSTOM_ENGINE_NOT_SUPPORTED
<add>### `ERR_CRYPTO_CUSTOM_ENGINE_NOT_SUPPORTED`
<ide>
<ide> A client certificate engine was requested that is not supported by the version
<ide> of OpenSSL being used.
<ide>
<ide> <a id="ERR_CRYPTO_ECDH_INVALID_FORMAT"></a>
<del>### ERR_CRYPTO_ECDH_INVALID_FORMAT
<add>### `ERR_CRYPTO_ECDH_INVALID_FORMAT`
<ide>
<ide> An invalid value for the `format` argument was passed to the `crypto.ECDH()`
<ide> class `getPublicKey()` method.
<ide>
<ide> <a id="ERR_CRYPTO_ECDH_INVALID_PUBLIC_KEY"></a>
<del>### ERR_CRYPTO_ECDH_INVALID_PUBLIC_KEY
<add>### `ERR_CRYPTO_ECDH_INVALID_PUBLIC_KEY`
<ide>
<ide> An invalid value for the `key` argument has been passed to the
<ide> `crypto.ECDH()` class `computeSecret()` method. It means that the public
<ide> key lies outside of the elliptic curve.
<ide>
<ide> <a id="ERR_CRYPTO_ENGINE_UNKNOWN"></a>
<del>### ERR_CRYPTO_ENGINE_UNKNOWN
<add>### `ERR_CRYPTO_ENGINE_UNKNOWN`
<ide>
<ide> An invalid crypto engine identifier was passed to
<ide> [`require('crypto').setEngine()`][].
<ide>
<ide> <a id="ERR_CRYPTO_FIPS_FORCED"></a>
<del>### ERR_CRYPTO_FIPS_FORCED
<add>### `ERR_CRYPTO_FIPS_FORCED`
<ide>
<ide> The [`--force-fips`][] command-line argument was used but there was an attempt
<ide> to enable or disable FIPS mode in the `crypto` module.
<ide>
<ide> <a id="ERR_CRYPTO_FIPS_UNAVAILABLE"></a>
<del>### ERR_CRYPTO_FIPS_UNAVAILABLE
<add>### `ERR_CRYPTO_FIPS_UNAVAILABLE`
<ide>
<ide> An attempt was made to enable or disable FIPS mode, but FIPS mode was not
<ide> available.
<ide>
<ide> <a id="ERR_CRYPTO_HASH_FINALIZED"></a>
<del>### ERR_CRYPTO_HASH_FINALIZED
<add>### `ERR_CRYPTO_HASH_FINALIZED`
<ide>
<ide> [`hash.digest()`][] was called multiple times. The `hash.digest()` method must
<ide> be called no more than one time per instance of a `Hash` object.
<ide>
<ide> <a id="ERR_CRYPTO_HASH_UPDATE_FAILED"></a>
<del>### ERR_CRYPTO_HASH_UPDATE_FAILED
<add>### `ERR_CRYPTO_HASH_UPDATE_FAILED`
<ide>
<ide> [`hash.update()`][] failed for any reason. This should rarely, if ever, happen.
<ide>
<ide> <a id="ERR_CRYPTO_INCOMPATIBLE_KEY_OPTIONS"></a>
<del>### ERR_CRYPTO_INCOMPATIBLE_KEY_OPTIONS
<add>### `ERR_CRYPTO_INCOMPATIBLE_KEY_OPTIONS`
<ide>
<ide> The selected public or private key encoding is incompatible with other options.
<ide>
<ide> <a id="ERR_CRYPTO_INVALID_DIGEST"></a>
<del>### ERR_CRYPTO_INVALID_DIGEST
<add>### `ERR_CRYPTO_INVALID_DIGEST`
<ide>
<ide> An invalid [crypto digest algorithm][] was specified.
<ide>
<ide> <a id="ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE"></a>
<del>### ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE
<add>### `ERR_CRYPTO_INVALID_KEY_OBJECT_TYPE`
<ide>
<ide> The given crypto key object's type is invalid for the attempted operation.
<ide>
<ide> <a id="ERR_CRYPTO_INVALID_STATE"></a>
<del>### ERR_CRYPTO_INVALID_STATE
<add>### `ERR_CRYPTO_INVALID_STATE`
<ide>
<ide> A crypto method was used on an object that was in an invalid state. For
<ide> instance, calling [`cipher.getAuthTag()`][] before calling `cipher.final()`.
<ide>
<ide> <a id="ERR_CRYPTO_PBKDF2_ERROR"></a>
<del>### ERR_CRYPTO_PBKDF2_ERROR
<add>### `ERR_CRYPTO_PBKDF2_ERROR`
<ide>
<ide> The PBKDF2 algorithm failed for unspecified reasons. OpenSSL does not provide
<ide> more details and therefore neither does Node.js.
<ide>
<ide> <a id="ERR_CRYPTO_SCRYPT_INVALID_PARAMETER"></a>
<del>### ERR_CRYPTO_SCRYPT_INVALID_PARAMETER
<add>### `ERR_CRYPTO_SCRYPT_INVALID_PARAMETER`
<ide>
<ide> One or more [`crypto.scrypt()`][] or [`crypto.scryptSync()`][] parameters are
<ide> outside their legal range.
<ide>
<ide> <a id="ERR_CRYPTO_SCRYPT_NOT_SUPPORTED"></a>
<del>### ERR_CRYPTO_SCRYPT_NOT_SUPPORTED
<add>### `ERR_CRYPTO_SCRYPT_NOT_SUPPORTED`
<ide>
<ide> Node.js was compiled without `scrypt` support. Not possible with the official
<ide> release binaries but can happen with custom builds, including distro builds.
<ide>
<ide> <a id="ERR_CRYPTO_SIGN_KEY_REQUIRED"></a>
<del>### ERR_CRYPTO_SIGN_KEY_REQUIRED
<add>### `ERR_CRYPTO_SIGN_KEY_REQUIRED`
<ide>
<ide> A signing `key` was not provided to the [`sign.sign()`][] method.
<ide>
<ide> <a id="ERR_CRYPTO_TIMING_SAFE_EQUAL_LENGTH"></a>
<del>### ERR_CRYPTO_TIMING_SAFE_EQUAL_LENGTH
<add>### `ERR_CRYPTO_TIMING_SAFE_EQUAL_LENGTH`
<ide>
<ide> [`crypto.timingSafeEqual()`][] was called with `Buffer`, `TypedArray`, or
<ide> `DataView` arguments of different lengths.
<ide>
<ide> <a id="ERR_DIR_CLOSED"></a>
<del>### ERR_DIR_CLOSED
<add>### `ERR_DIR_CLOSED`
<ide>
<ide> The [`fs.Dir`][] was previously closed.
<ide>
<ide> <a id="ERR_DNS_SET_SERVERS_FAILED"></a>
<del>### ERR_DNS_SET_SERVERS_FAILED
<add>### `ERR_DNS_SET_SERVERS_FAILED`
<ide>
<ide> `c-ares` failed to set the DNS server.
<ide>
<ide> <a id="ERR_DOMAIN_CALLBACK_NOT_AVAILABLE"></a>
<del>### ERR_DOMAIN_CALLBACK_NOT_AVAILABLE
<add>### `ERR_DOMAIN_CALLBACK_NOT_AVAILABLE`
<ide>
<ide> The `domain` module was not usable since it could not establish the required
<ide> error handling hooks, because
<ide> [`process.setUncaughtExceptionCaptureCallback()`][] had been called at an
<ide> earlier point in time.
<ide>
<ide> <a id="ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE"></a>
<del>### ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE
<add>### `ERR_DOMAIN_CANNOT_SET_UNCAUGHT_EXCEPTION_CAPTURE`
<ide>
<ide> [`process.setUncaughtExceptionCaptureCallback()`][] could not be called
<ide> because the `domain` module has been loaded at an earlier point in time.
<ide> The stack trace is extended to include the point in time at which the
<ide> `domain` module had been loaded.
<ide>
<ide> <a id="ERR_ENCODING_INVALID_ENCODED_DATA"></a>
<del>### ERR_ENCODING_INVALID_ENCODED_DATA
<add>### `ERR_ENCODING_INVALID_ENCODED_DATA`
<ide>
<ide> Data provided to `TextDecoder()` API was invalid according to the encoding
<ide> provided.
<ide>
<ide> <a id="ERR_ENCODING_NOT_SUPPORTED"></a>
<del>### ERR_ENCODING_NOT_SUPPORTED
<add>### `ERR_ENCODING_NOT_SUPPORTED`
<ide>
<ide> Encoding provided to `TextDecoder()` API was not one of the
<ide> [WHATWG Supported Encodings][].
<ide>
<ide> <a id="ERR_FALSY_VALUE_REJECTION"></a>
<del>### ERR_FALSY_VALUE_REJECTION
<add>### `ERR_FALSY_VALUE_REJECTION`
<ide>
<ide> A `Promise` that was callbackified via `util.callbackify()` was rejected with a
<ide> falsy value.
<ide>
<ide> <a id="ERR_FS_FILE_TOO_LARGE"></a>
<del>### ERR_FS_FILE_TOO_LARGE
<add>### `ERR_FS_FILE_TOO_LARGE`
<ide>
<ide> An attempt has been made to read a file whose size is larger than the maximum
<ide> allowed size for a `Buffer`.
<ide>
<ide> <a id="ERR_FS_INVALID_SYMLINK_TYPE"></a>
<del>### ERR_FS_INVALID_SYMLINK_TYPE
<add>### `ERR_FS_INVALID_SYMLINK_TYPE`
<ide>
<ide> An invalid symlink type was passed to the [`fs.symlink()`][] or
<ide> [`fs.symlinkSync()`][] methods.
<ide>
<ide> <a id="ERR_HTTP_HEADERS_SENT"></a>
<del>### ERR_HTTP_HEADERS_SENT
<add>### `ERR_HTTP_HEADERS_SENT`
<ide>
<ide> An attempt was made to add more headers after the headers had already been sent.
<ide>
<ide> <a id="ERR_HTTP_INVALID_HEADER_VALUE"></a>
<del>### ERR_HTTP_INVALID_HEADER_VALUE
<add>### `ERR_HTTP_INVALID_HEADER_VALUE`
<ide>
<ide> An invalid HTTP header value was specified.
<ide>
<ide> <a id="ERR_HTTP_INVALID_STATUS_CODE"></a>
<del>### ERR_HTTP_INVALID_STATUS_CODE
<add>### `ERR_HTTP_INVALID_STATUS_CODE`
<ide>
<ide> Status code was outside the regular status code range (100-999).
<ide>
<ide> <a id="ERR_HTTP_TRAILER_INVALID"></a>
<del>### ERR_HTTP_TRAILER_INVALID
<add>### `ERR_HTTP_TRAILER_INVALID`
<ide>
<ide> The `Trailer` header was set even though the transfer encoding does not support
<ide> that.
<ide>
<ide> <a id="ERR_HTTP2_ALTSVC_INVALID_ORIGIN"></a>
<del>### ERR_HTTP2_ALTSVC_INVALID_ORIGIN
<add>### `ERR_HTTP2_ALTSVC_INVALID_ORIGIN`
<ide>
<ide> HTTP/2 ALTSVC frames require a valid origin.
<ide>
<ide> <a id="ERR_HTTP2_ALTSVC_LENGTH"></a>
<del>### ERR_HTTP2_ALTSVC_LENGTH
<add>### `ERR_HTTP2_ALTSVC_LENGTH`
<ide>
<ide> HTTP/2 ALTSVC frames are limited to a maximum of 16,382 payload bytes.
<ide>
<ide> <a id="ERR_HTTP2_CONNECT_AUTHORITY"></a>
<del>### ERR_HTTP2_CONNECT_AUTHORITY
<add>### `ERR_HTTP2_CONNECT_AUTHORITY`
<ide>
<ide> For HTTP/2 requests using the `CONNECT` method, the `:authority` pseudo-header
<ide> is required.
<ide>
<ide> <a id="ERR_HTTP2_CONNECT_PATH"></a>
<del>### ERR_HTTP2_CONNECT_PATH
<add>### `ERR_HTTP2_CONNECT_PATH`
<ide>
<ide> For HTTP/2 requests using the `CONNECT` method, the `:path` pseudo-header is
<ide> forbidden.
<ide>
<ide> <a id="ERR_HTTP2_CONNECT_SCHEME"></a>
<del>### ERR_HTTP2_CONNECT_SCHEME
<add>### `ERR_HTTP2_CONNECT_SCHEME`
<ide>
<ide> For HTTP/2 requests using the `CONNECT` method, the `:scheme` pseudo-header is
<ide> forbidden.
<ide>
<ide> <a id="ERR_HTTP2_ERROR"></a>
<del>### ERR_HTTP2_ERROR
<add>### `ERR_HTTP2_ERROR`
<ide>
<ide> A non-specific HTTP/2 error has occurred.
<ide>
<ide> <a id="ERR_HTTP2_GOAWAY_SESSION"></a>
<del>### ERR_HTTP2_GOAWAY_SESSION
<add>### `ERR_HTTP2_GOAWAY_SESSION`
<ide>
<ide> New HTTP/2 Streams may not be opened after the `Http2Session` has received a
<ide> `GOAWAY` frame from the connected peer.
<ide>
<ide> <a id="ERR_HTTP2_HEADERS_AFTER_RESPOND"></a>
<del>### ERR_HTTP2_HEADERS_AFTER_RESPOND
<add>### `ERR_HTTP2_HEADERS_AFTER_RESPOND`
<ide>
<ide> An additional headers was specified after an HTTP/2 response was initiated.
<ide>
<ide> <a id="ERR_HTTP2_HEADERS_SENT"></a>
<del>### ERR_HTTP2_HEADERS_SENT
<add>### `ERR_HTTP2_HEADERS_SENT`
<ide>
<ide> An attempt was made to send multiple response headers.
<ide>
<ide> <a id="ERR_HTTP2_HEADER_SINGLE_VALUE"></a>
<del>### ERR_HTTP2_HEADER_SINGLE_VALUE
<add>### `ERR_HTTP2_HEADER_SINGLE_VALUE`
<ide>
<ide> Multiple values were provided for an HTTP/2 header field that was required to
<ide> have only a single value.
<ide>
<ide> <a id="ERR_HTTP2_INFO_STATUS_NOT_ALLOWED"></a>
<del>### ERR_HTTP2_INFO_STATUS_NOT_ALLOWED
<add>### `ERR_HTTP2_INFO_STATUS_NOT_ALLOWED`
<ide>
<ide> Informational HTTP status codes (`1xx`) may not be set as the response status
<ide> code on HTTP/2 responses.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_CONNECTION_HEADERS"></a>
<del>### ERR_HTTP2_INVALID_CONNECTION_HEADERS
<add>### `ERR_HTTP2_INVALID_CONNECTION_HEADERS`
<ide>
<ide> HTTP/1 connection specific headers are forbidden to be used in HTTP/2
<ide> requests and responses.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_HEADER_VALUE"></a>
<del>### ERR_HTTP2_INVALID_HEADER_VALUE
<add>### `ERR_HTTP2_INVALID_HEADER_VALUE`
<ide>
<ide> An invalid HTTP/2 header value was specified.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_INFO_STATUS"></a>
<del>### ERR_HTTP2_INVALID_INFO_STATUS
<add>### `ERR_HTTP2_INVALID_INFO_STATUS`
<ide>
<ide> An invalid HTTP informational status code has been specified. Informational
<ide> status codes must be an integer between `100` and `199` (inclusive).
<ide>
<ide> <a id="ERR_HTTP2_INVALID_ORIGIN"></a>
<del>### ERR_HTTP2_INVALID_ORIGIN
<add>### `ERR_HTTP2_INVALID_ORIGIN`
<ide>
<ide> HTTP/2 `ORIGIN` frames require a valid origin.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_PACKED_SETTINGS_LENGTH"></a>
<del>### ERR_HTTP2_INVALID_PACKED_SETTINGS_LENGTH
<add>### `ERR_HTTP2_INVALID_PACKED_SETTINGS_LENGTH`
<ide>
<ide> Input `Buffer` and `Uint8Array` instances passed to the
<ide> `http2.getUnpackedSettings()` API must have a length that is a multiple of
<ide> six.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_PSEUDOHEADER"></a>
<del>### ERR_HTTP2_INVALID_PSEUDOHEADER
<add>### `ERR_HTTP2_INVALID_PSEUDOHEADER`
<ide>
<ide> Only valid HTTP/2 pseudoheaders (`:status`, `:path`, `:authority`, `:scheme`,
<ide> and `:method`) may be used.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_SESSION"></a>
<del>### ERR_HTTP2_INVALID_SESSION
<add>### `ERR_HTTP2_INVALID_SESSION`
<ide>
<ide> An action was performed on an `Http2Session` object that had already been
<ide> destroyed.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_SETTING_VALUE"></a>
<del>### ERR_HTTP2_INVALID_SETTING_VALUE
<add>### `ERR_HTTP2_INVALID_SETTING_VALUE`
<ide>
<ide> An invalid value has been specified for an HTTP/2 setting.
<ide>
<ide> <a id="ERR_HTTP2_INVALID_STREAM"></a>
<del>### ERR_HTTP2_INVALID_STREAM
<add>### `ERR_HTTP2_INVALID_STREAM`
<ide>
<ide> An operation was performed on a stream that had already been destroyed.
<ide>
<ide> <a id="ERR_HTTP2_MAX_PENDING_SETTINGS_ACK"></a>
<del>### ERR_HTTP2_MAX_PENDING_SETTINGS_ACK
<add>### `ERR_HTTP2_MAX_PENDING_SETTINGS_ACK`
<ide>
<ide> Whenever an HTTP/2 `SETTINGS` frame is sent to a connected peer, the peer is
<ide> required to send an acknowledgment that it has received and applied the new
<ide> be sent at any given time. This error code is used when that limit has been
<ide> reached.
<ide>
<ide> <a id="ERR_HTTP2_NESTED_PUSH"></a>
<del>### ERR_HTTP2_NESTED_PUSH
<add>### `ERR_HTTP2_NESTED_PUSH`
<ide>
<ide> An attempt was made to initiate a new push stream from within a push stream.
<ide> Nested push streams are not permitted.
<ide>
<ide> <a id="ERR_HTTP2_NO_SOCKET_MANIPULATION"></a>
<del>### ERR_HTTP2_NO_SOCKET_MANIPULATION
<add>### `ERR_HTTP2_NO_SOCKET_MANIPULATION`
<ide>
<ide> An attempt was made to directly manipulate (read, write, pause, resume, etc.) a
<ide> socket attached to an `Http2Session`.
<ide>
<ide> <a id="ERR_HTTP2_ORIGIN_LENGTH"></a>
<del>### ERR_HTTP2_ORIGIN_LENGTH
<add>### `ERR_HTTP2_ORIGIN_LENGTH`
<ide>
<ide> HTTP/2 `ORIGIN` frames are limited to a length of 16382 bytes.
<ide>
<ide> <a id="ERR_HTTP2_OUT_OF_STREAMS"></a>
<del>### ERR_HTTP2_OUT_OF_STREAMS
<add>### `ERR_HTTP2_OUT_OF_STREAMS`
<ide>
<ide> The number of streams created on a single HTTP/2 session reached the maximum
<ide> limit.
<ide>
<ide> <a id="ERR_HTTP2_PAYLOAD_FORBIDDEN"></a>
<del>### ERR_HTTP2_PAYLOAD_FORBIDDEN
<add>### `ERR_HTTP2_PAYLOAD_FORBIDDEN`
<ide>
<ide> A message payload was specified for an HTTP response code for which a payload is
<ide> forbidden.
<ide>
<ide> <a id="ERR_HTTP2_PING_CANCEL"></a>
<del>### ERR_HTTP2_PING_CANCEL
<add>### `ERR_HTTP2_PING_CANCEL`
<ide>
<ide> An HTTP/2 ping was canceled.
<ide>
<ide> <a id="ERR_HTTP2_PING_LENGTH"></a>
<del>### ERR_HTTP2_PING_LENGTH
<add>### `ERR_HTTP2_PING_LENGTH`
<ide>
<ide> HTTP/2 ping payloads must be exactly 8 bytes in length.
<ide>
<ide> <a id="ERR_HTTP2_PSEUDOHEADER_NOT_ALLOWED"></a>
<del>### ERR_HTTP2_PSEUDOHEADER_NOT_ALLOWED
<add>### `ERR_HTTP2_PSEUDOHEADER_NOT_ALLOWED`
<ide>
<ide> An HTTP/2 pseudo-header has been used inappropriately. Pseudo-headers are header
<ide> key names that begin with the `:` prefix.
<ide>
<ide> <a id="ERR_HTTP2_PUSH_DISABLED"></a>
<del>### ERR_HTTP2_PUSH_DISABLED
<add>### `ERR_HTTP2_PUSH_DISABLED`
<ide>
<ide> An attempt was made to create a push stream, which had been disabled by the
<ide> client.
<ide>
<ide> <a id="ERR_HTTP2_SEND_FILE"></a>
<del>### ERR_HTTP2_SEND_FILE
<add>### `ERR_HTTP2_SEND_FILE`
<ide>
<ide> An attempt was made to use the `Http2Stream.prototype.responseWithFile()` API to
<ide> send a directory.
<ide>
<ide> <a id="ERR_HTTP2_SEND_FILE_NOSEEK"></a>
<del>### ERR_HTTP2_SEND_FILE_NOSEEK
<add>### `ERR_HTTP2_SEND_FILE_NOSEEK`
<ide>
<ide> An attempt was made to use the `Http2Stream.prototype.responseWithFile()` API to
<ide> send something other than a regular file, but `offset` or `length` options were
<ide> provided.
<ide>
<ide> <a id="ERR_HTTP2_SESSION_ERROR"></a>
<del>### ERR_HTTP2_SESSION_ERROR
<add>### `ERR_HTTP2_SESSION_ERROR`
<ide>
<ide> The `Http2Session` closed with a non-zero error code.
<ide>
<ide> <a id="ERR_HTTP2_SETTINGS_CANCEL"></a>
<del>### ERR_HTTP2_SETTINGS_CANCEL
<add>### `ERR_HTTP2_SETTINGS_CANCEL`
<ide>
<ide> The `Http2Session` settings canceled.
<ide>
<ide> <a id="ERR_HTTP2_SOCKET_BOUND"></a>
<del>### ERR_HTTP2_SOCKET_BOUND
<add>### `ERR_HTTP2_SOCKET_BOUND`
<ide>
<ide> An attempt was made to connect a `Http2Session` object to a `net.Socket` or
<ide> `tls.TLSSocket` that had already been bound to another `Http2Session` object.
<ide>
<ide> <a id="ERR_HTTP2_SOCKET_UNBOUND"></a>
<del>### ERR_HTTP2_SOCKET_UNBOUND
<add>### `ERR_HTTP2_SOCKET_UNBOUND`
<ide>
<ide> An attempt was made to use the `socket` property of an `Http2Session` that
<ide> has already been closed.
<ide>
<ide> <a id="ERR_HTTP2_STATUS_101"></a>
<del>### ERR_HTTP2_STATUS_101
<add>### `ERR_HTTP2_STATUS_101`
<ide>
<ide> Use of the `101` Informational status code is forbidden in HTTP/2.
<ide>
<ide> <a id="ERR_HTTP2_STATUS_INVALID"></a>
<del>### ERR_HTTP2_STATUS_INVALID
<add>### `ERR_HTTP2_STATUS_INVALID`
<ide>
<ide> An invalid HTTP status code has been specified. Status codes must be an integer
<ide> between `100` and `599` (inclusive).
<ide>
<ide> <a id="ERR_HTTP2_STREAM_CANCEL"></a>
<del>### ERR_HTTP2_STREAM_CANCEL
<add>### `ERR_HTTP2_STREAM_CANCEL`
<ide>
<ide> An `Http2Stream` was destroyed before any data was transmitted to the connected
<ide> peer.
<ide>
<ide> <a id="ERR_HTTP2_STREAM_ERROR"></a>
<del>### ERR_HTTP2_STREAM_ERROR
<add>### `ERR_HTTP2_STREAM_ERROR`
<ide>
<ide> A non-zero error code was been specified in an `RST_STREAM` frame.
<ide>
<ide> <a id="ERR_HTTP2_STREAM_SELF_DEPENDENCY"></a>
<del>### ERR_HTTP2_STREAM_SELF_DEPENDENCY
<add>### `ERR_HTTP2_STREAM_SELF_DEPENDENCY`
<ide>
<ide> When setting the priority for an HTTP/2 stream, the stream may be marked as
<ide> a dependency for a parent stream. This error code is used when an attempt is
<ide> made to mark a stream and dependent of itself.
<ide>
<ide> <a id="ERR_HTTP2_TRAILERS_ALREADY_SENT"></a>
<del>### ERR_HTTP2_TRAILERS_ALREADY_SENT
<add>### `ERR_HTTP2_TRAILERS_ALREADY_SENT`
<ide>
<ide> Trailing headers have already been sent on the `Http2Stream`.
<ide>
<ide> <a id="ERR_HTTP2_TRAILERS_NOT_READY"></a>
<del>### ERR_HTTP2_TRAILERS_NOT_READY
<add>### `ERR_HTTP2_TRAILERS_NOT_READY`
<ide>
<ide> The `http2stream.sendTrailers()` method cannot be called until after the
<ide> `'wantTrailers'` event is emitted on an `Http2Stream` object. The
<ide> `'wantTrailers'` event will only be emitted if the `waitForTrailers` option
<ide> is set for the `Http2Stream`.
<ide>
<ide> <a id="ERR_HTTP2_UNSUPPORTED_PROTOCOL"></a>
<del>### ERR_HTTP2_UNSUPPORTED_PROTOCOL
<add>### `ERR_HTTP2_UNSUPPORTED_PROTOCOL`
<ide>
<ide> `http2.connect()` was passed a URL that uses any protocol other than `http:` or
<ide> `https:`.
<ide>
<ide> <a id="ERR_INTERNAL_ASSERTION"></a>
<del>### ERR_INTERNAL_ASSERTION
<add>### `ERR_INTERNAL_ASSERTION`
<ide>
<ide> There was a bug in Node.js or incorrect usage of Node.js internals.
<ide> To fix the error, open an issue at https://github.com/nodejs/node/issues.
<ide>
<ide> <a id="ERR_INCOMPATIBLE_OPTION_PAIR"></a>
<del>### ERR_INCOMPATIBLE_OPTION_PAIR
<add>### `ERR_INCOMPATIBLE_OPTION_PAIR`
<ide>
<ide> An option pair is incompatible with each other and can not be used at the same
<ide> time.
<ide>
<ide> <a id="ERR_INPUT_TYPE_NOT_ALLOWED"></a>
<del>### ERR_INPUT_TYPE_NOT_ALLOWED
<add>### `ERR_INPUT_TYPE_NOT_ALLOWED`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> The `--input-type` flag was used to attempt to execute a file. This flag can
<ide> only be used with input via `--eval`, `--print` or `STDIN`.
<ide>
<ide> <a id="ERR_INSPECTOR_ALREADY_CONNECTED"></a>
<del>### ERR_INSPECTOR_ALREADY_CONNECTED
<add>### `ERR_INSPECTOR_ALREADY_CONNECTED`
<ide>
<ide> While using the `inspector` module, an attempt was made to connect when the
<ide> inspector was already connected.
<ide>
<ide> <a id="ERR_INSPECTOR_CLOSED"></a>
<del>### ERR_INSPECTOR_CLOSED
<add>### `ERR_INSPECTOR_CLOSED`
<ide>
<ide> While using the `inspector` module, an attempt was made to use the inspector
<ide> after the session had already closed.
<ide>
<ide> <a id="ERR_INSPECTOR_COMMAND"></a>
<del>### ERR_INSPECTOR_COMMAND
<add>### `ERR_INSPECTOR_COMMAND`
<ide>
<ide> An error occurred while issuing a command via the `inspector` module.
<ide>
<ide> <a id="ERR_INSPECTOR_NOT_ACTIVE"></a>
<del>### ERR_INSPECTOR_NOT_ACTIVE
<add>### `ERR_INSPECTOR_NOT_ACTIVE`
<ide>
<ide> The `inspector` is not active when `inspector.waitForDebugger()` is called.
<ide>
<ide> <a id="ERR_INSPECTOR_NOT_AVAILABLE"></a>
<del>### ERR_INSPECTOR_NOT_AVAILABLE
<add>### `ERR_INSPECTOR_NOT_AVAILABLE`
<ide>
<ide> The `inspector` module is not available for use.
<ide>
<ide> <a id="ERR_INSPECTOR_NOT_CONNECTED"></a>
<del>### ERR_INSPECTOR_NOT_CONNECTED
<add>### `ERR_INSPECTOR_NOT_CONNECTED`
<ide>
<ide> While using the `inspector` module, an attempt was made to use the inspector
<ide> before it was connected.
<ide>
<ide> <a id="ERR_INSPECTOR_NOT_WORKER"></a>
<del>### ERR_INSPECTOR_NOT_WORKER
<add>### `ERR_INSPECTOR_NOT_WORKER`
<ide>
<ide> An API was called on the main thread that can only be used from
<ide> the worker thread.
<ide>
<ide> <a id="ERR_INVALID_ADDRESS_FAMILY"></a>
<del>### ERR_INVALID_ADDRESS_FAMILY
<add>### `ERR_INVALID_ADDRESS_FAMILY`
<ide>
<ide> The provided address family is not understood by the Node.js API.
<ide>
<ide> <a id="ERR_INVALID_ARG_TYPE"></a>
<del>### ERR_INVALID_ARG_TYPE
<add>### `ERR_INVALID_ARG_TYPE`
<ide>
<ide> An argument of the wrong type was passed to a Node.js API.
<ide>
<ide> <a id="ERR_INVALID_ARG_VALUE"></a>
<del>### ERR_INVALID_ARG_VALUE
<add>### `ERR_INVALID_ARG_VALUE`
<ide>
<ide> An invalid or unsupported value was passed for a given argument.
<ide>
<ide> <a id="ERR_INVALID_ASYNC_ID"></a>
<del>### ERR_INVALID_ASYNC_ID
<add>### `ERR_INVALID_ASYNC_ID`
<ide>
<ide> An invalid `asyncId` or `triggerAsyncId` was passed using `AsyncHooks`. An id
<ide> less than -1 should never happen.
<ide>
<ide> <a id="ERR_INVALID_BUFFER_SIZE"></a>
<del>### ERR_INVALID_BUFFER_SIZE
<add>### `ERR_INVALID_BUFFER_SIZE`
<ide>
<ide> A swap was performed on a `Buffer` but its size was not compatible with the
<ide> operation.
<ide>
<ide> <a id="ERR_INVALID_CALLBACK"></a>
<del>### ERR_INVALID_CALLBACK
<add>### `ERR_INVALID_CALLBACK`
<ide>
<ide> A callback function was required but was not been provided to a Node.js API.
<ide>
<ide> <a id="ERR_INVALID_CHAR"></a>
<del>### ERR_INVALID_CHAR
<add>### `ERR_INVALID_CHAR`
<ide>
<ide> Invalid characters were detected in headers.
<ide>
<ide> <a id="ERR_INVALID_CURSOR_POS"></a>
<del>### ERR_INVALID_CURSOR_POS
<add>### `ERR_INVALID_CURSOR_POS`
<ide>
<ide> A cursor on a given stream cannot be moved to a specified row without a
<ide> specified column.
<ide>
<ide> <a id="ERR_INVALID_FD"></a>
<del>### ERR_INVALID_FD
<add>### `ERR_INVALID_FD`
<ide>
<ide> A file descriptor ('fd') was not valid (e.g. it was a negative value).
<ide>
<ide> <a id="ERR_INVALID_FD_TYPE"></a>
<del>### ERR_INVALID_FD_TYPE
<add>### `ERR_INVALID_FD_TYPE`
<ide>
<ide> A file descriptor ('fd') type was not valid.
<ide>
<ide> <a id="ERR_INVALID_FILE_URL_HOST"></a>
<del>### ERR_INVALID_FILE_URL_HOST
<add>### `ERR_INVALID_FILE_URL_HOST`
<ide>
<ide> A Node.js API that consumes `file:` URLs (such as certain functions in the
<ide> [`fs`][] module) encountered a file URL with an incompatible host. This
<ide> situation can only occur on Unix-like systems where only `localhost` or an empty
<ide> host is supported.
<ide>
<ide> <a id="ERR_INVALID_FILE_URL_PATH"></a>
<del>### ERR_INVALID_FILE_URL_PATH
<add>### `ERR_INVALID_FILE_URL_PATH`
<ide>
<ide> A Node.js API that consumes `file:` URLs (such as certain functions in the
<ide> [`fs`][] module) encountered a file URL with an incompatible path. The exact
<ide> semantics for determining whether a path can be used is platform-dependent.
<ide>
<ide> <a id="ERR_INVALID_HANDLE_TYPE"></a>
<del>### ERR_INVALID_HANDLE_TYPE
<add>### `ERR_INVALID_HANDLE_TYPE`
<ide>
<ide> An attempt was made to send an unsupported "handle" over an IPC communication
<ide> channel to a child process. See [`subprocess.send()`][] and [`process.send()`][]
<ide> for more information.
<ide>
<ide> <a id="ERR_INVALID_HTTP_TOKEN"></a>
<del>### ERR_INVALID_HTTP_TOKEN
<add>### `ERR_INVALID_HTTP_TOKEN`
<ide>
<ide> An invalid HTTP token was supplied.
<ide>
<ide> <a id="ERR_INVALID_IP_ADDRESS"></a>
<del>### ERR_INVALID_IP_ADDRESS
<add>### `ERR_INVALID_IP_ADDRESS`
<ide>
<ide> An IP address is not valid.
<ide>
<ide> <a id="ERR_INVALID_OPT_VALUE"></a>
<del>### ERR_INVALID_OPT_VALUE
<add>### `ERR_INVALID_OPT_VALUE`
<ide>
<ide> An invalid or unexpected value was passed in an options object.
<ide>
<ide> <a id="ERR_INVALID_OPT_VALUE_ENCODING"></a>
<del>### ERR_INVALID_OPT_VALUE_ENCODING
<add>### `ERR_INVALID_OPT_VALUE_ENCODING`
<ide>
<ide> An invalid or unknown file encoding was passed.
<ide>
<ide> <a id="ERR_INVALID_PACKAGE_CONFIG"></a>
<del>### ERR_INVALID_PACKAGE_CONFIG
<add>### `ERR_INVALID_PACKAGE_CONFIG`
<ide>
<ide> An invalid `package.json` file was found which failed parsing.
<ide>
<ide> <a id="ERR_INVALID_PERFORMANCE_MARK"></a>
<del>### ERR_INVALID_PERFORMANCE_MARK
<add>### `ERR_INVALID_PERFORMANCE_MARK`
<ide>
<ide> While using the Performance Timing API (`perf_hooks`), a performance mark is
<ide> invalid.
<ide>
<ide> <a id="ERR_INVALID_PROTOCOL"></a>
<del>### ERR_INVALID_PROTOCOL
<add>### `ERR_INVALID_PROTOCOL`
<ide>
<ide> An invalid `options.protocol` was passed to `http.request()`.
<ide>
<ide> <a id="ERR_INVALID_REPL_EVAL_CONFIG"></a>
<del>### ERR_INVALID_REPL_EVAL_CONFIG
<add>### `ERR_INVALID_REPL_EVAL_CONFIG`
<ide>
<ide> Both `breakEvalOnSigint` and `eval` options were set in the [`REPL`][] config,
<ide> which is not supported.
<ide>
<ide> <a id="ERR_INVALID_REPL_INPUT"></a>
<del>### ERR_INVALID_REPL_INPUT
<add>### `ERR_INVALID_REPL_INPUT`
<ide>
<ide> The input may not be used in the [`REPL`][]. All prohibited inputs are
<ide> documented in the [`REPL`][]'s documentation.
<ide>
<ide> <a id="ERR_INVALID_RETURN_PROPERTY"></a>
<del>### ERR_INVALID_RETURN_PROPERTY
<add>### `ERR_INVALID_RETURN_PROPERTY`
<ide>
<ide> Thrown in case a function option does not provide a valid value for one of its
<ide> returned object properties on execution.
<ide>
<ide> <a id="ERR_INVALID_RETURN_PROPERTY_VALUE"></a>
<del>### ERR_INVALID_RETURN_PROPERTY_VALUE
<add>### `ERR_INVALID_RETURN_PROPERTY_VALUE`
<ide>
<ide> Thrown in case a function option does not provide an expected value
<ide> type for one of its returned object properties on execution.
<ide>
<ide> <a id="ERR_INVALID_RETURN_VALUE"></a>
<del>### ERR_INVALID_RETURN_VALUE
<add>### `ERR_INVALID_RETURN_VALUE`
<ide>
<ide> Thrown in case a function option does not return an expected value
<ide> type on execution, such as when a function is expected to return a promise.
<ide>
<ide> <a id="ERR_INVALID_SYNC_FORK_INPUT"></a>
<del>### ERR_INVALID_SYNC_FORK_INPUT
<add>### `ERR_INVALID_SYNC_FORK_INPUT`
<ide>
<ide> A `Buffer`, `TypedArray`, `DataView` or `string` was provided as stdio input to
<ide> an asynchronous fork. See the documentation for the [`child_process`][] module
<ide> for more information.
<ide>
<ide> <a id="ERR_INVALID_THIS"></a>
<del>### ERR_INVALID_THIS
<add>### `ERR_INVALID_THIS`
<ide>
<ide> A Node.js API function was called with an incompatible `this` value.
<ide>
<ide> urlSearchParams.has.call(buf, 'foo');
<ide> ```
<ide>
<ide> <a id="ERR_INVALID_TRANSFER_OBJECT"></a>
<del>### ERR_INVALID_TRANSFER_OBJECT
<add>### `ERR_INVALID_TRANSFER_OBJECT`
<ide>
<ide> An invalid transfer object was passed to `postMessage()`.
<ide>
<ide> <a id="ERR_INVALID_TUPLE"></a>
<del>### ERR_INVALID_TUPLE
<add>### `ERR_INVALID_TUPLE`
<ide>
<ide> An element in the `iterable` provided to the [WHATWG][WHATWG URL API]
<ide> [`URLSearchParams` constructor][`new URLSearchParams(iterable)`] did not
<ide> represent a `[name, value]` tuple – that is, if an element is not iterable, or
<ide> does not consist of exactly two elements.
<ide>
<ide> <a id="ERR_INVALID_URI"></a>
<del>### ERR_INVALID_URI
<add>### `ERR_INVALID_URI`
<ide>
<ide> An invalid URI was passed.
<ide>
<ide> <a id="ERR_INVALID_URL"></a>
<del>### ERR_INVALID_URL
<add>### `ERR_INVALID_URL`
<ide>
<ide> An invalid URL was passed to the [WHATWG][WHATWG URL API]
<ide> [`URL` constructor][`new URL(input)`] to be parsed. The thrown error object
<ide> typically has an additional property `'input'` that contains the URL that failed
<ide> to parse.
<ide>
<ide> <a id="ERR_INVALID_URL_SCHEME"></a>
<del>### ERR_INVALID_URL_SCHEME
<add>### `ERR_INVALID_URL_SCHEME`
<ide>
<ide> An attempt was made to use a URL of an incompatible scheme (protocol) for a
<ide> specific purpose. It is only used in the [WHATWG URL API][] support in the
<ide> [`fs`][] module (which only accepts URLs with `'file'` scheme), but may be used
<ide> in other Node.js APIs as well in the future.
<ide>
<ide> <a id="ERR_IPC_CHANNEL_CLOSED"></a>
<del>### ERR_IPC_CHANNEL_CLOSED
<add>### `ERR_IPC_CHANNEL_CLOSED`
<ide>
<ide> An attempt was made to use an IPC communication channel that was already closed.
<ide>
<ide> <a id="ERR_IPC_DISCONNECTED"></a>
<del>### ERR_IPC_DISCONNECTED
<add>### `ERR_IPC_DISCONNECTED`
<ide>
<ide> An attempt was made to disconnect an IPC communication channel that was already
<ide> disconnected. See the documentation for the [`child_process`][] module
<ide> for more information.
<ide>
<ide> <a id="ERR_IPC_ONE_PIPE"></a>
<del>### ERR_IPC_ONE_PIPE
<add>### `ERR_IPC_ONE_PIPE`
<ide>
<ide> An attempt was made to create a child Node.js process using more than one IPC
<ide> communication channel. See the documentation for the [`child_process`][] module
<ide> for more information.
<ide>
<ide> <a id="ERR_IPC_SYNC_FORK"></a>
<del>### ERR_IPC_SYNC_FORK
<add>### `ERR_IPC_SYNC_FORK`
<ide>
<ide> An attempt was made to open an IPC communication channel with a synchronously
<ide> forked Node.js process. See the documentation for the [`child_process`][] module
<ide> for more information.
<ide>
<ide> <a id="ERR_MANIFEST_ASSERT_INTEGRITY"></a>
<del>### ERR_MANIFEST_ASSERT_INTEGRITY
<add>### `ERR_MANIFEST_ASSERT_INTEGRITY`
<ide>
<ide> An attempt was made to load a resource, but the resource did not match the
<ide> integrity defined by the policy manifest. See the documentation for [policy][]
<ide> manifests for more information.
<ide>
<ide> <a id="ERR_MANIFEST_DEPENDENCY_MISSING"></a>
<del>### ERR_MANIFEST_DEPENDENCY_MISSING
<add>### `ERR_MANIFEST_DEPENDENCY_MISSING`
<ide>
<ide> An attempt was made to load a resource, but the resource was not listed as a
<ide> dependency from the location that attempted to load it. See the documentation
<ide> for [policy][] manifests for more information.
<ide>
<ide> <a id="ERR_MANIFEST_INTEGRITY_MISMATCH"></a>
<del>### ERR_MANIFEST_INTEGRITY_MISMATCH
<add>### `ERR_MANIFEST_INTEGRITY_MISMATCH`
<ide>
<ide> An attempt was made to load a policy manifest, but the manifest had multiple
<ide> entries for a resource which did not match each other. Update the manifest
<ide> entries to match in order to resolve this error. See the documentation for
<ide> [policy][] manifests for more information.
<ide>
<ide> <a id="ERR_MANIFEST_INVALID_RESOURCE_FIELD"></a>
<del>### ERR_MANIFEST_INVALID_RESOURCE_FIELD
<add>### `ERR_MANIFEST_INVALID_RESOURCE_FIELD`
<ide>
<ide> A policy manifest resource had an invalid value for one of its fields. Update
<ide> the manifest entry to match in order to resolve this error. See the
<ide> documentation for [policy][] manifests for more information.
<ide>
<ide> <a id="ERR_MANIFEST_PARSE_POLICY"></a>
<del>### ERR_MANIFEST_PARSE_POLICY
<add>### `ERR_MANIFEST_PARSE_POLICY`
<ide>
<ide> An attempt was made to load a policy manifest, but the manifest was unable to
<ide> be parsed. See the documentation for [policy][] manifests for more information.
<ide>
<ide> <a id="ERR_MANIFEST_TDZ"></a>
<del>### ERR_MANIFEST_TDZ
<add>### `ERR_MANIFEST_TDZ`
<ide>
<ide> An attempt was made to read from a policy manifest, but the manifest
<ide> initialization has not yet taken place. This is likely a bug in Node.js.
<ide>
<ide> <a id="ERR_MANIFEST_UNKNOWN_ONERROR"></a>
<del>### ERR_MANIFEST_UNKNOWN_ONERROR
<add>### `ERR_MANIFEST_UNKNOWN_ONERROR`
<ide>
<ide> A policy manifest was loaded, but had an unknown value for its "onerror"
<ide> behavior. See the documentation for [policy][] manifests for more information.
<ide>
<ide> <a id="ERR_MEMORY_ALLOCATION_FAILED"></a>
<del>### ERR_MEMORY_ALLOCATION_FAILED
<add>### `ERR_MEMORY_ALLOCATION_FAILED`
<ide>
<ide> An attempt was made to allocate memory (usually in the C++ layer) but it
<ide> failed.
<ide>
<ide> <a id="ERR_METHOD_NOT_IMPLEMENTED"></a>
<del>### ERR_METHOD_NOT_IMPLEMENTED
<add>### `ERR_METHOD_NOT_IMPLEMENTED`
<ide>
<ide> A method is required but not implemented.
<ide>
<ide> <a id="ERR_MISSING_ARGS"></a>
<del>### ERR_MISSING_ARGS
<add>### `ERR_MISSING_ARGS`
<ide>
<ide> A required argument of a Node.js API was not passed. This is only used for
<ide> strict compliance with the API specification (which in some cases may accept
<ide> strict compliance with the API specification (which in some cases may accept
<ide> [`ERR_INVALID_ARG_TYPE`][] error code may be used instead.
<ide>
<ide> <a id="ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK"></a>
<del>### ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK
<add>### `ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> An [ES Module][] loader hook specified `format: 'dynamic'` but did not provide
<ide> a `dynamicInstantiate` hook.
<ide>
<ide> <a id="ERR_MISSING_MESSAGE_PORT_IN_TRANSFER_LIST"></a>
<del>### ERR_MISSING_MESSAGE_PORT_IN_TRANSFER_LIST
<add>### `ERR_MISSING_MESSAGE_PORT_IN_TRANSFER_LIST`
<ide>
<ide> A `MessagePort` was found in the object passed to a `postMessage()` call,
<ide> but not provided in the `transferList` for that call.
<ide>
<ide> <a id="ERR_MISSING_PASSPHRASE"></a>
<del>### ERR_MISSING_PASSPHRASE
<add>### `ERR_MISSING_PASSPHRASE`
<ide>
<ide> An attempt was made to read an encrypted key without specifying a passphrase.
<ide>
<ide> <a id="ERR_MISSING_PLATFORM_FOR_WORKER"></a>
<del>### ERR_MISSING_PLATFORM_FOR_WORKER
<add>### `ERR_MISSING_PLATFORM_FOR_WORKER`
<ide>
<ide> The V8 platform used by this instance of Node.js does not support creating
<ide> Workers. This is caused by lack of embedder support for Workers. In particular,
<ide> this error will not occur with standard builds of Node.js.
<ide>
<ide> <a id="ERR_MODULE_NOT_FOUND"></a>
<del>### ERR_MODULE_NOT_FOUND
<add>### `ERR_MODULE_NOT_FOUND`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> An [ES Module][] could not be resolved.
<ide>
<ide> <a id="ERR_MULTIPLE_CALLBACK"></a>
<del>### ERR_MULTIPLE_CALLBACK
<add>### `ERR_MULTIPLE_CALLBACK`
<ide>
<ide> A callback was called more than once.
<ide>
<ide> can either be fulfilled or rejected but not both at the same time. The latter
<ide> would be possible by calling a callback more than once.
<ide>
<ide> <a id="ERR_NAPI_CONS_FUNCTION"></a>
<del>### ERR_NAPI_CONS_FUNCTION
<add>### `ERR_NAPI_CONS_FUNCTION`
<ide>
<ide> While using `N-API`, a constructor passed was not a function.
<ide>
<ide> <a id="ERR_NAPI_INVALID_DATAVIEW_ARGS"></a>
<del>### ERR_NAPI_INVALID_DATAVIEW_ARGS
<add>### `ERR_NAPI_INVALID_DATAVIEW_ARGS`
<ide>
<ide> While calling `napi_create_dataview()`, a given `offset` was outside the bounds
<ide> of the dataview or `offset + length` was larger than a length of given `buffer`.
<ide>
<ide> <a id="ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT"></a>
<del>### ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT
<add>### `ERR_NAPI_INVALID_TYPEDARRAY_ALIGNMENT`
<ide>
<ide> While calling `napi_create_typedarray()`, the provided `offset` was not a
<ide> multiple of the element size.
<ide>
<ide> <a id="ERR_NAPI_INVALID_TYPEDARRAY_LENGTH"></a>
<del>### ERR_NAPI_INVALID_TYPEDARRAY_LENGTH
<add>### `ERR_NAPI_INVALID_TYPEDARRAY_LENGTH`
<ide>
<ide> While calling `napi_create_typedarray()`, `(length * size_of_element) +
<ide> byte_offset` was larger than the length of given `buffer`.
<ide>
<ide> <a id="ERR_NAPI_TSFN_CALL_JS"></a>
<del>### ERR_NAPI_TSFN_CALL_JS
<add>### `ERR_NAPI_TSFN_CALL_JS`
<ide>
<ide> An error occurred while invoking the JavaScript portion of the thread-safe
<ide> function.
<ide>
<ide> <a id="ERR_NAPI_TSFN_GET_UNDEFINED"></a>
<del>### ERR_NAPI_TSFN_GET_UNDEFINED
<add>### `ERR_NAPI_TSFN_GET_UNDEFINED`
<ide>
<ide> An error occurred while attempting to retrieve the JavaScript `undefined`
<ide> value.
<ide>
<ide> <a id="ERR_NAPI_TSFN_START_IDLE_LOOP"></a>
<del>### ERR_NAPI_TSFN_START_IDLE_LOOP
<add>### `ERR_NAPI_TSFN_START_IDLE_LOOP`
<ide>
<ide> On the main thread, values are removed from the queue associated with the
<ide> thread-safe function in an idle loop. This error indicates that an error
<ide> has occurred when attempting to start the loop.
<ide>
<ide> <a id="ERR_NAPI_TSFN_STOP_IDLE_LOOP"></a>
<del>### ERR_NAPI_TSFN_STOP_IDLE_LOOP
<add>### `ERR_NAPI_TSFN_STOP_IDLE_LOOP`
<ide>
<ide> Once no more items are left in the queue, the idle loop must be suspended. This
<ide> error indicates that the idle loop has failed to stop.
<ide>
<ide> <a id="ERR_NO_CRYPTO"></a>
<del>### ERR_NO_CRYPTO
<add>### `ERR_NO_CRYPTO`
<ide>
<ide> An attempt was made to use crypto features while Node.js was not compiled with
<ide> OpenSSL crypto support.
<ide>
<ide> <a id="ERR_NO_ICU"></a>
<del>### ERR_NO_ICU
<add>### `ERR_NO_ICU`
<ide>
<ide> An attempt was made to use features that require [ICU][], but Node.js was not
<ide> compiled with ICU support.
<ide>
<ide> <a id="ERR_NON_CONTEXT_AWARE_DISABLED"></a>
<del>### ERR_NON_CONTEXT_AWARE_DISABLED
<add>### `ERR_NON_CONTEXT_AWARE_DISABLED`
<ide>
<ide> A non-context-aware native addon was loaded in a process that disallows them.
<ide>
<ide> <a id="ERR_OUT_OF_RANGE"></a>
<del>### ERR_OUT_OF_RANGE
<add>### `ERR_OUT_OF_RANGE`
<ide>
<ide> A given value is out of the accepted range.
<ide>
<ide> <a id="ERR_REQUIRE_ESM"></a>
<del>### ERR_REQUIRE_ESM
<add>### `ERR_REQUIRE_ESM`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> An attempt was made to `require()` an [ES Module][].
<ide>
<ide> <a id="ERR_SCRIPT_EXECUTION_INTERRUPTED"></a>
<del>### ERR_SCRIPT_EXECUTION_INTERRUPTED
<add>### `ERR_SCRIPT_EXECUTION_INTERRUPTED`
<ide>
<ide> Script execution was interrupted by `SIGINT` (For example, when Ctrl+C was
<ide> pressed).
<ide>
<ide> <a id="ERR_SCRIPT_EXECUTION_TIMEOUT"></a>
<del>### ERR_SCRIPT_EXECUTION_TIMEOUT
<add>### `ERR_SCRIPT_EXECUTION_TIMEOUT`
<ide>
<ide> Script execution timed out, possibly due to bugs in the script being executed.
<ide>
<ide> <a id="ERR_SERVER_ALREADY_LISTEN"></a>
<del>### ERR_SERVER_ALREADY_LISTEN
<add>### `ERR_SERVER_ALREADY_LISTEN`
<ide>
<ide> The [`server.listen()`][] method was called while a `net.Server` was already
<ide> listening. This applies to all instances of `net.Server`, including HTTP, HTTPS,
<ide> and HTTP/2 `Server` instances.
<ide>
<ide> <a id="ERR_SERVER_NOT_RUNNING"></a>
<del>### ERR_SERVER_NOT_RUNNING
<add>### `ERR_SERVER_NOT_RUNNING`
<ide>
<ide> The [`server.close()`][] method was called when a `net.Server` was not
<ide> running. This applies to all instances of `net.Server`, including HTTP, HTTPS,
<ide> and HTTP/2 `Server` instances.
<ide>
<ide> <a id="ERR_SOCKET_ALREADY_BOUND"></a>
<del>### ERR_SOCKET_ALREADY_BOUND
<add>### `ERR_SOCKET_ALREADY_BOUND`
<ide>
<ide> An attempt was made to bind a socket that has already been bound.
<ide>
<ide> <a id="ERR_SOCKET_BAD_BUFFER_SIZE"></a>
<del>### ERR_SOCKET_BAD_BUFFER_SIZE
<add>### `ERR_SOCKET_BAD_BUFFER_SIZE`
<ide>
<ide> An invalid (negative) size was passed for either the `recvBufferSize` or
<ide> `sendBufferSize` options in [`dgram.createSocket()`][].
<ide>
<ide> <a id="ERR_SOCKET_BAD_PORT"></a>
<del>### ERR_SOCKET_BAD_PORT
<add>### `ERR_SOCKET_BAD_PORT`
<ide>
<ide> An API function expecting a port >= 0 and < 65536 received an invalid value.
<ide>
<ide> <a id="ERR_SOCKET_BAD_TYPE"></a>
<del>### ERR_SOCKET_BAD_TYPE
<add>### `ERR_SOCKET_BAD_TYPE`
<ide>
<ide> An API function expecting a socket type (`udp4` or `udp6`) received an invalid
<ide> value.
<ide>
<ide> <a id="ERR_SOCKET_BUFFER_SIZE"></a>
<del>### ERR_SOCKET_BUFFER_SIZE
<add>### `ERR_SOCKET_BUFFER_SIZE`
<ide>
<ide> While using [`dgram.createSocket()`][], the size of the receive or send `Buffer`
<ide> could not be determined.
<ide>
<ide> <a id="ERR_SOCKET_CANNOT_SEND"></a>
<del>### ERR_SOCKET_CANNOT_SEND
<add>### `ERR_SOCKET_CANNOT_SEND`
<ide>
<ide> Data could be sent on a socket.
<ide>
<ide> <a id="ERR_SOCKET_CLOSED"></a>
<del>### ERR_SOCKET_CLOSED
<add>### `ERR_SOCKET_CLOSED`
<ide>
<ide> An attempt was made to operate on an already closed socket.
<ide>
<ide> <a id="ERR_SOCKET_DGRAM_IS_CONNECTED"></a>
<del>### ERR_SOCKET_DGRAM_IS_CONNECTED
<add>### `ERR_SOCKET_DGRAM_IS_CONNECTED`
<ide>
<ide> A [`dgram.connect()`][] call was made on an already connected socket.
<ide>
<ide> <a id="ERR_SOCKET_DGRAM_NOT_CONNECTED"></a>
<del>### ERR_SOCKET_DGRAM_NOT_CONNECTED
<add>### `ERR_SOCKET_DGRAM_NOT_CONNECTED`
<ide>
<ide> A [`dgram.disconnect()`][] or [`dgram.remoteAddress()`][] call was made on a
<ide> disconnected socket.
<ide>
<ide> <a id="ERR_SOCKET_DGRAM_NOT_RUNNING"></a>
<del>### ERR_SOCKET_DGRAM_NOT_RUNNING
<add>### `ERR_SOCKET_DGRAM_NOT_RUNNING`
<ide>
<ide> A call was made and the UDP subsystem was not running.
<ide>
<ide> <a id="ERR_SRI_PARSE"></a>
<del>### ERR_SRI_PARSE
<add>### `ERR_SRI_PARSE`
<ide>
<ide> A string was provided for a Subresource Integrity check, but was unable to be
<ide> parsed. Check the format of integrity attributes by looking at the
<ide> [Subresource Integrity specification][].
<ide>
<ide> <a id="ERR_STREAM_CANNOT_PIPE"></a>
<del>### ERR_STREAM_CANNOT_PIPE
<add>### `ERR_STREAM_CANNOT_PIPE`
<ide>
<ide> An attempt was made to call [`stream.pipe()`][] on a [`Writable`][] stream.
<ide>
<ide> <a id="ERR_STREAM_DESTROYED"></a>
<del>### ERR_STREAM_DESTROYED
<add>### `ERR_STREAM_DESTROYED`
<ide>
<ide> A stream method was called that cannot complete because the stream was
<ide> destroyed using `stream.destroy()`.
<ide>
<ide> <a id="ERR_STREAM_ALREADY_FINISHED"></a>
<del>### ERR_STREAM_ALREADY_FINISHED
<add>### `ERR_STREAM_ALREADY_FINISHED`
<ide>
<ide> A stream method was called that cannot complete because the stream was
<ide> finished.
<ide>
<ide> <a id="ERR_STREAM_NULL_VALUES"></a>
<del>### ERR_STREAM_NULL_VALUES
<add>### `ERR_STREAM_NULL_VALUES`
<ide>
<ide> An attempt was made to call [`stream.write()`][] with a `null` chunk.
<ide>
<ide> <a id="ERR_STREAM_PREMATURE_CLOSE"></a>
<del>### ERR_STREAM_PREMATURE_CLOSE
<add>### `ERR_STREAM_PREMATURE_CLOSE`
<ide>
<ide> An error returned by `stream.finished()` and `stream.pipeline()`, when a stream
<ide> or a pipeline ends non gracefully with no explicit error.
<ide>
<ide> <a id="ERR_STREAM_PUSH_AFTER_EOF"></a>
<del>### ERR_STREAM_PUSH_AFTER_EOF
<add>### `ERR_STREAM_PUSH_AFTER_EOF`
<ide>
<ide> An attempt was made to call [`stream.push()`][] after a `null`(EOF) had been
<ide> pushed to the stream.
<ide>
<ide> <a id="ERR_STREAM_UNSHIFT_AFTER_END_EVENT"></a>
<del>### ERR_STREAM_UNSHIFT_AFTER_END_EVENT
<add>### `ERR_STREAM_UNSHIFT_AFTER_END_EVENT`
<ide>
<ide> An attempt was made to call [`stream.unshift()`][] after the `'end'` event was
<ide> emitted.
<ide>
<ide> <a id="ERR_STREAM_WRAP"></a>
<del>### ERR_STREAM_WRAP
<add>### `ERR_STREAM_WRAP`
<ide>
<ide> Prevents an abort if a string decoder was set on the Socket or if the decoder
<ide> is in `objectMode`.
<ide> instance.setEncoding('utf8');
<ide> ```
<ide>
<ide> <a id="ERR_STREAM_WRITE_AFTER_END"></a>
<del>### ERR_STREAM_WRITE_AFTER_END
<add>### `ERR_STREAM_WRITE_AFTER_END`
<ide>
<ide> An attempt was made to call [`stream.write()`][] after `stream.end()` has been
<ide> called.
<ide>
<ide> <a id="ERR_STRING_TOO_LONG"></a>
<del>### ERR_STRING_TOO_LONG
<add>### `ERR_STRING_TOO_LONG`
<ide>
<ide> An attempt has been made to create a string longer than the maximum allowed
<ide> length.
<ide>
<ide> <a id="ERR_SYNTHETIC"></a>
<del>### ERR_SYNTHETIC
<add>### `ERR_SYNTHETIC`
<ide>
<ide> An artificial error object used to capture the call stack for diagnostic
<ide> reports.
<ide>
<ide> <a id="ERR_SYSTEM_ERROR"></a>
<del>### ERR_SYSTEM_ERROR
<add>### `ERR_SYSTEM_ERROR`
<ide>
<ide> An unspecified or non-specific system error has occurred within the Node.js
<ide> process. The error object will have an `err.info` object property with
<ide> additional details.
<ide>
<ide> <a id="ERR_TLS_CERT_ALTNAME_INVALID"></a>
<del>### ERR_TLS_CERT_ALTNAME_INVALID
<add>### `ERR_TLS_CERT_ALTNAME_INVALID`
<ide>
<ide> While using TLS, the hostname/IP of the peer did not match any of the
<ide> `subjectAltNames` in its certificate.
<ide>
<ide> <a id="ERR_TLS_DH_PARAM_SIZE"></a>
<del>### ERR_TLS_DH_PARAM_SIZE
<add>### `ERR_TLS_DH_PARAM_SIZE`
<ide>
<ide> While using TLS, the parameter offered for the Diffie-Hellman (`DH`)
<ide> key-agreement protocol is too small. By default, the key length must be greater
<ide> than or equal to 1024 bits to avoid vulnerabilities, even though it is strongly
<ide> recommended to use 2048 bits or larger for stronger security.
<ide>
<ide> <a id="ERR_TLS_HANDSHAKE_TIMEOUT"></a>
<del>### ERR_TLS_HANDSHAKE_TIMEOUT
<add>### `ERR_TLS_HANDSHAKE_TIMEOUT`
<ide>
<ide> A TLS/SSL handshake timed out. In this case, the server must also abort the
<ide> connection.
<ide>
<ide> <a id="ERR_TLS_INVALID_CONTEXT">
<del>### ERR_TLS_INVALID_CONTEXT
<add>### `ERR_TLS_INVALID_CONTEXT`
<ide> <!-- YAML
<ide> added: v13.3.0
<ide> -->
<ide>
<ide> The context must be a `SecureContext`.
<ide>
<ide> <a id="ERR_TLS_INVALID_PROTOCOL_METHOD"></a>
<del>### ERR_TLS_INVALID_PROTOCOL_METHOD
<add>### `ERR_TLS_INVALID_PROTOCOL_METHOD`
<ide>
<ide> The specified `secureProtocol` method is invalid. It is either unknown, or
<ide> disabled because it is insecure.
<ide>
<ide> <a id="ERR_TLS_INVALID_PROTOCOL_VERSION"></a>
<del>### ERR_TLS_INVALID_PROTOCOL_VERSION
<add>### `ERR_TLS_INVALID_PROTOCOL_VERSION`
<ide>
<ide> Valid TLS protocol versions are `'TLSv1'`, `'TLSv1.1'`, or `'TLSv1.2'`.
<ide>
<ide> <a id="ERR_TLS_PROTOCOL_VERSION_CONFLICT"></a>
<del>### ERR_TLS_PROTOCOL_VERSION_CONFLICT
<add>### `ERR_TLS_PROTOCOL_VERSION_CONFLICT`
<ide>
<ide> Attempting to set a TLS protocol `minVersion` or `maxVersion` conflicts with an
<ide> attempt to set the `secureProtocol` explicitly. Use one mechanism or the other.
<ide>
<ide> <a id="ERR_TLS_RENEGOTIATION_DISABLED"></a>
<del>### ERR_TLS_RENEGOTIATION_DISABLED
<add>### `ERR_TLS_RENEGOTIATION_DISABLED`
<ide>
<ide> An attempt was made to renegotiate TLS on a socket instance with TLS disabled.
<ide>
<ide> <a id="ERR_TLS_REQUIRED_SERVER_NAME"></a>
<del>### ERR_TLS_REQUIRED_SERVER_NAME
<add>### `ERR_TLS_REQUIRED_SERVER_NAME`
<ide>
<ide> While using TLS, the `server.addContext()` method was called without providing
<ide> a hostname in the first parameter.
<ide>
<ide> <a id="ERR_TLS_SESSION_ATTACK"></a>
<del>### ERR_TLS_SESSION_ATTACK
<add>### `ERR_TLS_SESSION_ATTACK`
<ide>
<ide> An excessive amount of TLS renegotiations is detected, which is a potential
<ide> vector for denial-of-service attacks.
<ide>
<ide> <a id="ERR_TLS_SNI_FROM_SERVER"></a>
<del>### ERR_TLS_SNI_FROM_SERVER
<add>### `ERR_TLS_SNI_FROM_SERVER`
<ide>
<ide> An attempt was made to issue Server Name Indication from a TLS server-side
<ide> socket, which is only valid from a client.
<ide> socket, which is only valid from a client.
<ide> Failed to set PSK identity hint. Hint may be too long.
<ide>
<ide> <a id="ERR_TRACE_EVENTS_CATEGORY_REQUIRED"></a>
<del>### ERR_TRACE_EVENTS_CATEGORY_REQUIRED
<add>### `ERR_TRACE_EVENTS_CATEGORY_REQUIRED`
<ide>
<ide> The `trace_events.createTracing()` method requires at least one trace event
<ide> category.
<ide>
<ide> <a id="ERR_TRACE_EVENTS_UNAVAILABLE"></a>
<del>### ERR_TRACE_EVENTS_UNAVAILABLE
<add>### `ERR_TRACE_EVENTS_UNAVAILABLE`
<ide>
<ide> The `trace_events` module could not be loaded because Node.js was compiled with
<ide> the `--without-v8-platform` flag.
<ide>
<ide> <a id="ERR_TRANSFERRING_EXTERNALIZED_SHAREDARRAYBUFFER"></a>
<del>### ERR_TRANSFERRING_EXTERNALIZED_SHAREDARRAYBUFFER
<add>### `ERR_TRANSFERRING_EXTERNALIZED_SHAREDARRAYBUFFER`
<ide>
<ide> A `SharedArrayBuffer` whose memory is not managed by the JavaScript engine
<ide> or by Node.js was encountered during serialization. Such a `SharedArrayBuffer`
<ide> This can only happen when native addons create `SharedArrayBuffer`s in
<ide> "externalized" mode, or put existing `SharedArrayBuffer` into externalized mode.
<ide>
<ide> <a id="ERR_TRANSFORM_ALREADY_TRANSFORMING"></a>
<del>### ERR_TRANSFORM_ALREADY_TRANSFORMING
<add>### `ERR_TRANSFORM_ALREADY_TRANSFORMING`
<ide>
<ide> A `Transform` stream finished while it was still transforming.
<ide>
<ide> <a id="ERR_TRANSFORM_WITH_LENGTH_0"></a>
<del>### ERR_TRANSFORM_WITH_LENGTH_0
<add>### `ERR_TRANSFORM_WITH_LENGTH_0`
<ide>
<ide> A `Transform` stream finished with data still in the write buffer.
<ide>
<ide> <a id="ERR_TTY_INIT_FAILED"></a>
<del>### ERR_TTY_INIT_FAILED
<add>### `ERR_TTY_INIT_FAILED`
<ide>
<ide> The initialization of a TTY failed due to a system error.
<ide>
<ide> <a id="ERR_UNCAUGHT_EXCEPTION_CAPTURE_ALREADY_SET"></a>
<del>### ERR_UNCAUGHT_EXCEPTION_CAPTURE_ALREADY_SET
<add>### `ERR_UNCAUGHT_EXCEPTION_CAPTURE_ALREADY_SET`
<ide>
<ide> [`process.setUncaughtExceptionCaptureCallback()`][] was called twice,
<ide> without first resetting the callback to `null`.
<ide> This error is designed to prevent accidentally overwriting a callback registered
<ide> from another module.
<ide>
<ide> <a id="ERR_UNESCAPED_CHARACTERS"></a>
<del>### ERR_UNESCAPED_CHARACTERS
<add>### `ERR_UNESCAPED_CHARACTERS`
<ide>
<ide> A string that contained unescaped characters was received.
<ide>
<ide> <a id="ERR_UNHANDLED_ERROR"></a>
<del>### ERR_UNHANDLED_ERROR
<add>### `ERR_UNHANDLED_ERROR`
<ide>
<ide> An unhandled error occurred (for instance, when an `'error'` event is emitted
<ide> by an [`EventEmitter`][] but an `'error'` handler is not registered).
<ide>
<ide> <a id="ERR_UNKNOWN_BUILTIN_MODULE"></a>
<del>### ERR_UNKNOWN_BUILTIN_MODULE
<add>### `ERR_UNKNOWN_BUILTIN_MODULE`
<ide>
<ide> Used to identify a specific kind of internal Node.js error that should not
<ide> typically be triggered by user code. Instances of this error point to an
<ide> internal bug within the Node.js binary itself.
<ide>
<ide> <a id="ERR_UNKNOWN_CREDENTIAL"></a>
<del>### ERR_UNKNOWN_CREDENTIAL
<add>### `ERR_UNKNOWN_CREDENTIAL`
<ide>
<ide> A Unix group or user identifier that does not exist was passed.
<ide>
<ide> <a id="ERR_UNKNOWN_ENCODING"></a>
<del>### ERR_UNKNOWN_ENCODING
<add>### `ERR_UNKNOWN_ENCODING`
<ide>
<ide> An invalid or unknown encoding option was passed to an API.
<ide>
<ide> <a id="ERR_UNKNOWN_FILE_EXTENSION"></a>
<del>### ERR_UNKNOWN_FILE_EXTENSION
<add>### `ERR_UNKNOWN_FILE_EXTENSION`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> An attempt was made to load a module with an unknown or unsupported file
<ide> extension.
<ide>
<ide> <a id="ERR_UNKNOWN_MODULE_FORMAT"></a>
<del>### ERR_UNKNOWN_MODULE_FORMAT
<add>### `ERR_UNKNOWN_MODULE_FORMAT`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> An attempt was made to load a module with an unknown or unsupported format.
<ide>
<ide> <a id="ERR_UNKNOWN_SIGNAL"></a>
<del>### ERR_UNKNOWN_SIGNAL
<add>### `ERR_UNKNOWN_SIGNAL`
<ide>
<ide> An invalid or unknown process signal was passed to an API expecting a valid
<ide> signal (such as [`subprocess.kill()`][]).
<ide>
<ide> <a id="ERR_V8BREAKITERATOR"></a>
<del>### ERR_V8BREAKITERATOR
<add>### `ERR_V8BREAKITERATOR`
<ide>
<ide> The V8 `BreakIterator` API was used but the full ICU data set is not installed.
<ide>
<ide> <a id="ERR_VALID_PERFORMANCE_ENTRY_TYPE"></a>
<del>### ERR_VALID_PERFORMANCE_ENTRY_TYPE
<add>### `ERR_VALID_PERFORMANCE_ENTRY_TYPE`
<ide>
<ide> While using the Performance Timing API (`perf_hooks`), no valid performance
<ide> entry types were found.
<ide>
<ide> <a id="ERR_VM_DYNAMIC_IMPORT_CALLBACK_MISSING"></a>
<del>### ERR_VM_DYNAMIC_IMPORT_CALLBACK_MISSING
<add>### `ERR_VM_DYNAMIC_IMPORT_CALLBACK_MISSING`
<ide>
<ide> A dynamic import callback was not specified.
<ide>
<ide> <a id="ERR_VM_MODULE_ALREADY_LINKED"></a>
<del>### ERR_VM_MODULE_ALREADY_LINKED
<add>### `ERR_VM_MODULE_ALREADY_LINKED`
<ide>
<ide> The module attempted to be linked is not eligible for linking, because of one of
<ide> the following reasons:
<ide> the following reasons:
<ide> * Linking has failed for this module (`linkingStatus` is `'errored'`)
<ide>
<ide> <a id="ERR_VM_MODULE_DIFFERENT_CONTEXT"></a>
<del>### ERR_VM_MODULE_DIFFERENT_CONTEXT
<add>### `ERR_VM_MODULE_DIFFERENT_CONTEXT`
<ide>
<ide> The module being returned from the linker function is from a different context
<ide> than the parent module. Linked modules must share the same context.
<ide>
<ide> <a id="ERR_VM_MODULE_LINKING_ERRORED"></a>
<del>### ERR_VM_MODULE_LINKING_ERRORED
<add>### `ERR_VM_MODULE_LINKING_ERRORED`
<ide>
<ide> The linker function returned a module for which linking has failed.
<ide>
<ide> <a id="ERR_VM_MODULE_NOT_MODULE"></a>
<del>### ERR_VM_MODULE_NOT_MODULE
<add>### `ERR_VM_MODULE_NOT_MODULE`
<ide>
<ide> The fulfilled value of a linking promise is not a `vm.Module` object.
<ide>
<ide> <a id="ERR_VM_MODULE_STATUS"></a>
<del>### ERR_VM_MODULE_STATUS
<add>### `ERR_VM_MODULE_STATUS`
<ide>
<ide> The current module's status does not allow for this operation. The specific
<ide> meaning of the error depends on the specific function.
<ide>
<ide> <a id="ERR_WASI_ALREADY_STARTED"></a>
<del>### ERR_WASI_ALREADY_STARTED
<add>### `ERR_WASI_ALREADY_STARTED`
<ide>
<ide> The WASI instance has already started.
<ide>
<ide> <a id="ERR_WORKER_INVALID_EXEC_ARGV"></a>
<del>### ERR_WORKER_INVALID_EXEC_ARGV
<add>### `ERR_WORKER_INVALID_EXEC_ARGV`
<ide>
<ide> The `execArgv` option passed to the `Worker` constructor contains
<ide> invalid flags.
<ide>
<ide> <a id="ERR_WORKER_OUT_OF_MEMORY"></a>
<del>### ERR_WORKER_OUT_OF_MEMORY
<add>### `ERR_WORKER_OUT_OF_MEMORY`
<ide>
<ide> The `Worker` instance terminated because it reached its memory limit.
<ide>
<ide> <a id="ERR_WORKER_PATH"></a>
<del>### ERR_WORKER_PATH
<add>### `ERR_WORKER_PATH`
<ide>
<ide> The path for the main script of a worker is neither an absolute path
<ide> nor a relative path starting with `./` or `../`.
<ide>
<ide> <a id="ERR_WORKER_UNSERIALIZABLE_ERROR"></a>
<del>### ERR_WORKER_UNSERIALIZABLE_ERROR
<add>### `ERR_WORKER_UNSERIALIZABLE_ERROR`
<ide>
<ide> All attempts at serializing an uncaught exception from a worker thread failed.
<ide>
<ide> <a id="ERR_WORKER_UNSUPPORTED_EXTENSION"></a>
<del>### ERR_WORKER_UNSUPPORTED_EXTENSION
<add>### `ERR_WORKER_UNSUPPORTED_EXTENSION`
<ide>
<ide> The pathname used for the main script of a worker has an
<ide> unknown file extension.
<ide>
<ide> <a id="ERR_WORKER_UNSUPPORTED_OPERATION"></a>
<del>### ERR_WORKER_UNSUPPORTED_OPERATION
<add>### `ERR_WORKER_UNSUPPORTED_OPERATION`
<ide>
<ide> The requested functionality is not supported in worker threads.
<ide>
<ide> <a id="ERR_ZLIB_INITIALIZATION_FAILED"></a>
<del>### ERR_ZLIB_INITIALIZATION_FAILED
<add>### `ERR_ZLIB_INITIALIZATION_FAILED`
<ide>
<ide> Creation of a [`zlib`][] object failed due to incorrect configuration.
<ide>
<ide> <a id="HPE_HEADER_OVERFLOW"></a>
<del>### HPE_HEADER_OVERFLOW
<add>### `HPE_HEADER_OVERFLOW`
<ide> <!-- YAML
<ide> changes:
<ide> - version: v11.4.0
<ide> HTTP parsing will abort without a request or response object being created, and
<ide> an `Error` with this code will be emitted.
<ide>
<ide> <a id="MODULE_NOT_FOUND"></a>
<del>### MODULE_NOT_FOUND
<add>### `MODULE_NOT_FOUND`
<ide> <!-- YAML
<ide> changes:
<ide> - version: v12.0.0
<ide> A module file could not be resolved while attempting a [`require()`][] or
<ide> > been removed.
<ide>
<ide> <a id="ERR_CANNOT_TRANSFER_OBJECT"></a>
<del>### ERR_CANNOT_TRANSFER_OBJECT
<add>### `ERR_CANNOT_TRANSFER_OBJECT`
<ide> <!--
<ide> added: v10.5.0
<ide> removed: v12.5.0
<ide> The value passed to `postMessage()` contained an object that is not supported
<ide> for transferring.
<ide>
<ide> <a id="ERR_CLOSED_MESSAGE_PORT"></a>
<del>### ERR_CLOSED_MESSAGE_PORT
<add>### `ERR_CLOSED_MESSAGE_PORT`
<ide> <!-- YAML
<ide> added: v10.5.0
<ide> removed: v11.12.0
<ide> There was an attempt to use a `MessagePort` instance in a closed
<ide> state, usually after `.close()` has been called.
<ide>
<ide> <a id="ERR_CRYPTO_HASH_DIGEST_NO_UTF16"></a>
<del>### ERR_CRYPTO_HASH_DIGEST_NO_UTF16
<add>### `ERR_CRYPTO_HASH_DIGEST_NO_UTF16`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v12.12.0
<ide> causing the method to return a string rather than a `Buffer`, the UTF-16
<ide> encoding (e.g. `ucs` or `utf16le`) is not supported.
<ide>
<ide> <a id="ERR_HTTP2_FRAME_ERROR"></a>
<del>### ERR_HTTP2_FRAME_ERROR
<add>### `ERR_HTTP2_FRAME_ERROR`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> Used when a failure occurs sending an individual frame on the HTTP/2
<ide> session.
<ide>
<ide> <a id="ERR_HTTP2_HEADERS_OBJECT"></a>
<del>### ERR_HTTP2_HEADERS_OBJECT
<add>### `ERR_HTTP2_HEADERS_OBJECT`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> Used when an HTTP/2 Headers Object is expected.
<ide>
<ide> <a id="ERR_HTTP2_HEADER_REQUIRED"></a>
<del>### ERR_HTTP2_HEADER_REQUIRED
<add>### `ERR_HTTP2_HEADER_REQUIRED`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> Used when a required header is missing in an HTTP/2 message.
<ide>
<ide> <a id="ERR_HTTP2_INFO_HEADERS_AFTER_RESPOND"></a>
<del>### ERR_HTTP2_INFO_HEADERS_AFTER_RESPOND
<add>### `ERR_HTTP2_INFO_HEADERS_AFTER_RESPOND`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> HTTP/2 informational headers must only be sent *prior* to calling the
<ide> `Http2Stream.prototype.respond()` method.
<ide>
<ide> <a id="ERR_HTTP2_STREAM_CLOSED"></a>
<del>### ERR_HTTP2_STREAM_CLOSED
<add>### `ERR_HTTP2_STREAM_CLOSED`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> Used when an action has been performed on an HTTP/2 Stream that has already
<ide> been closed.
<ide>
<ide> <a id="ERR_HTTP_INVALID_CHAR"></a>
<del>### ERR_HTTP_INVALID_CHAR
<add>### `ERR_HTTP_INVALID_CHAR`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> Used when an invalid character is found in an HTTP response status message
<ide> (reason phrase).
<ide>
<ide> <a id="ERR_INDEX_OUT_OF_RANGE"></a>
<del>### ERR_INDEX_OUT_OF_RANGE
<add>### `ERR_INDEX_OUT_OF_RANGE`
<ide> <!-- YAML
<ide> added: v10.0.0
<ide> removed: v11.0.0
<ide> -->
<ide> A given index was out of the accepted range (e.g. negative offsets).
<ide>
<ide> <a id="ERR_NAPI_CONS_PROTOTYPE_OBJECT"></a>
<del>### ERR_NAPI_CONS_PROTOTYPE_OBJECT
<add>### `ERR_NAPI_CONS_PROTOTYPE_OBJECT`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> Used by the `N-API` when `Constructor.prototype` is not an object.
<ide>
<ide> <a id="ERR_NO_LONGER_SUPPORTED"></a>
<del>### ERR_NO_LONGER_SUPPORTED
<add>### `ERR_NO_LONGER_SUPPORTED`
<ide>
<ide> A Node.js API was called in an unsupported manner, such as
<ide> `Buffer.write(string, encoding, offset[, length])`.
<ide>
<ide> <a id="ERR_OUTOFMEMORY"></a>
<del>### ERR_OUTOFMEMORY
<add>### `ERR_OUTOFMEMORY`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> Used generically to identify that an operation caused an out of memory
<ide> condition.
<ide>
<ide> <a id="ERR_PARSE_HISTORY_DATA"></a>
<del>### ERR_PARSE_HISTORY_DATA
<add>### `ERR_PARSE_HISTORY_DATA`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> The `repl` module was unable to parse data from the REPL history file.
<ide>
<ide> <a id="ERR_STDERR_CLOSE"></a>
<del>### ERR_STDERR_CLOSE
<add>### `ERR_STDERR_CLOSE`
<ide> <!-- YAML
<ide> removed: v10.12.0
<ide> changes:
<ide> An attempt was made to close the `process.stderr` stream. By design, Node.js
<ide> does not allow `stdout` or `stderr` streams to be closed by user code.
<ide>
<ide> <a id="ERR_STDOUT_CLOSE"></a>
<del>### ERR_STDOUT_CLOSE
<add>### `ERR_STDOUT_CLOSE`
<ide> <!-- YAML
<ide> removed: v10.12.0
<ide> changes:
<ide> An attempt was made to close the `process.stdout` stream. By design, Node.js
<ide> does not allow `stdout` or `stderr` streams to be closed by user code.
<ide>
<ide> <a id="ERR_STREAM_READ_NOT_IMPLEMENTED"></a>
<del>### ERR_STREAM_READ_NOT_IMPLEMENTED
<add>### `ERR_STREAM_READ_NOT_IMPLEMENTED`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> Used when an attempt is made to use a readable stream that has not implemented
<ide> [`readable._read()`][].
<ide>
<ide> <a id="ERR_TLS_RENEGOTIATION_FAILED"></a>
<del>### ERR_TLS_RENEGOTIATION_FAILED
<add>### `ERR_TLS_RENEGOTIATION_FAILED`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> Used when a TLS renegotiation request has failed in a non-specific way.
<ide>
<ide> <a id="ERR_UNKNOWN_BUILTIN_MODULE"></a>
<del>### ERR_UNKNOWN_BUILTIN_MODULE
<add>### `ERR_UNKNOWN_BUILTIN_MODULE`
<ide> <!-- YAML
<ide> added: v8.0.0
<ide> removed: v9.0.0
<ide> code. Instances of this error point to an internal bug within the Node.js
<ide> binary itself.
<ide>
<ide> <a id="ERR_UNKNOWN_STDIN_TYPE"></a>
<del>### ERR_UNKNOWN_STDIN_TYPE
<add>### `ERR_UNKNOWN_STDIN_TYPE`
<ide> <!-- YAML
<ide> added: v8.0.0
<ide> removed: v11.7.0
<ide> type. This error is usually an indication of a bug within Node.js itself,
<ide> although it is possible for user code to trigger it.
<ide>
<ide> <a id="ERR_UNKNOWN_STREAM_TYPE"></a>
<del>### ERR_UNKNOWN_STREAM_TYPE
<add>### `ERR_UNKNOWN_STREAM_TYPE`
<ide> <!-- YAML
<ide> added: v8.0.0
<ide> removed: v11.7.0
<ide> An attempt was made to launch a Node.js process with an unknown `stdout` or
<ide> itself, although it is possible for user code to trigger it.
<ide>
<ide> <a id="ERR_VALUE_OUT_OF_RANGE"></a>
<del>### ERR_VALUE_OUT_OF_RANGE
<add>### `ERR_VALUE_OUT_OF_RANGE`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> removed: v10.0.0
<ide> Used when a given value is out of the accepted range.
<ide>
<ide> <a id="ERR_VM_MODULE_NOT_LINKED"></a>
<del>### ERR_VM_MODULE_NOT_LINKED
<add>### `ERR_VM_MODULE_NOT_LINKED`
<ide>
<ide> The module must be successfully linked before instantiation.
<ide>
<ide> <a id="ERR_ZLIB_BINDING_CLOSED"></a>
<del>### ERR_ZLIB_BINDING_CLOSED
<add>### `ERR_ZLIB_BINDING_CLOSED`
<ide> <!-- YAML
<ide> added: v9.0.0
<ide> removed: v10.0.0
<ide> These errors have never been released, but had been present on master between
<ide> releases.
<ide>
<ide> <a id="ERR_ENTRY_TYPE_MISMATCH"></a>
<del>#### ERR_ENTRY_TYPE_MISMATCH
<add>#### `ERR_ENTRY_TYPE_MISMATCH`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> a `.js` file where the nearest parent `package.json` either lacks a `"type"`
<ide> field or contains `"type": "commonjs"`.
<ide>
<ide> <a id="ERR_FS_WATCHER_ALREADY_STARTED"></a>
<del>#### ERR_FS_WATCHER_ALREADY_STARTED
<add>#### `ERR_FS_WATCHER_ALREADY_STARTED`
<ide>
<ide> An attempt was made to start a watcher returned by `fs.watch()` that has
<ide> already been started.
<ide>
<ide> <a id="ERR_FS_WATCHER_NOT_STARTED"></a>
<del>#### ERR_FS_WATCHER_NOT_STARTED
<add>#### `ERR_FS_WATCHER_NOT_STARTED`
<ide>
<ide> An attempt was made to initiate operations on a watcher returned by
<ide> `fs.watch()` that has not yet been started.
<ide>
<ide> <a id="ERR_HTTP2_ALREADY_SHUTDOWN"></a>
<del>#### ERR_HTTP2_ALREADY_SHUTDOWN
<add>#### `ERR_HTTP2_ALREADY_SHUTDOWN`
<ide>
<ide> Occurs with multiple attempts to shutdown an HTTP/2 session.
<ide>
<ide> <a id="ERR_HTTP2_ERROR"></a>
<del>#### ERR_HTTP2_ERROR
<add>#### `ERR_HTTP2_ERROR`
<ide>
<ide> A non-specific HTTP/2 error has occurred.
<ide>
<ide> <a id="ERR_INVALID_REPL_HISTORY"></a>
<del>#### ERR_INVALID_REPL_HISTORY
<add>#### `ERR_INVALID_REPL_HISTORY`
<ide>
<ide> Used in the `repl` in case the old history file is used and an error occurred
<ide> while trying to read and parse it.
<ide>
<ide> <a id="ERR_INVALID_REPL_TYPE"></a>
<del>#### ERR_INVALID_REPL_TYPE
<add>#### `ERR_INVALID_REPL_TYPE`
<ide>
<ide> > Stability: 1 - Experimental
<ide>
<ide> The `--entry-type=...` flag is not compatible with the Node.js REPL.
<ide>
<ide> <a id="ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK"></a>
<del>#### ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK
<add>#### `ERR_MISSING_DYNAMIC_INSTANTIATE_HOOK`
<ide>
<ide> Used when an [ES Module][] loader hook specifies `format: 'dynamic'` but does
<ide> not provide a `dynamicInstantiate` hook.
<ide>
<ide> <a id="ERR_STREAM_HAS_STRINGDECODER"></a>
<del>#### ERR_STREAM_HAS_STRINGDECODER
<add>#### `ERR_STREAM_HAS_STRINGDECODER`
<ide>
<ide> Used to prevent an abort if a string decoder was set on the Socket.
<ide>
<ide> instance.setEncoding('utf8');
<ide> ```
<ide>
<ide> <a id="ERR_STRING_TOO_LARGE"></a>
<del>#### ERR_STRING_TOO_LARGE
<add>#### `ERR_STRING_TOO_LARGE`
<ide>
<ide> An attempt has been made to create a string larger than the maximum allowed
<ide> size.
<ide>
<ide> <a id="ERR_TTY_WRITABLE_NOT_READABLE"></a>
<del>#### ERR_TTY_WRITABLE_NOT_READABLE
<add>#### `ERR_TTY_WRITABLE_NOT_READABLE`
<ide>
<ide> This `Error` is thrown when a read is attempted on a TTY `WriteStream`,
<ide> such as `process.stdout.on('data')`.
<ide><path>tools/eslint-rules/documented-errors.js
<ide> const doc = fs.readFileSync(path.resolve(__dirname, '../../doc/api/errors.md'),
<ide> 'utf8');
<ide>
<ide> function isInDoc(code) {
<del> return doc.match(`### ${code}`) != null;
<add> return doc.includes(`### \`${code}\``);
<ide> }
<ide>
<ide> function includesAnchor(code) {
<del> return doc.match(`<a id="${code}"></a>`) != null;
<add> return doc.includes(`<a id="${code}"></a>`);
<ide> }
<ide>
<ide> function errorForNode(node) {
| 2
|
Text
|
Text
|
fix firefox bug
|
fff3d5dfb44b0fd326c65266d32a4ae7b1dc375d
|
<ide><path>docs/QuickStart-GettingStarted.md
<ide> function display(type, value) {
<ide> var container = document.getElementsByTagName('block')[0].parentNode;
<ide> container.className = 'display-' + type + '-' + value + ' ' +
<ide> container.className.replace(RegExp('display-' + type + '-[a-z]+ ?'), '');
<del> event && event.preventDefault();
<ide> }
<ide>
<ide> // If we are coming to the page with a hash in it (i.e. from a search, for example), try to get
| 1
|
Python
|
Python
|
replace `assert` with `valueerror`
|
04cddaf402591e9f5bdb5f116a111d829a0ce4f4
|
<ide><path>src/transformers/models/bert_generation/modeling_bert_generation.py
<ide> def load_tf_weights_in_bert_generation(
<ide> else:
<ide> model_pointer = model_pointer.weight
<ide>
<del> try:
<del> assert (
<del> model_pointer.shape == array.shape
<del> ), f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched"
<del> except AssertionError as e:
<del> e.args += (model_pointer.shape, array.shape)
<del> raise
<add> if model_pointer.shape != array.shape:
<add> raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
<ide> logger.info(f"Initialize PyTorch weight {key}")
<ide>
<ide> model_pointer.data = torch.from_numpy(array.astype(np.float32))
| 1
|
PHP
|
PHP
|
fix code to not be a breaking chage
|
30c4e946033db1df58b2f11d4d8f6885a356c3b8
|
<ide><path>src/Illuminate/Database/Query/Builder.php
<ide> protected function restoreFieldsForCount()
<ide> */
<ide> public function exists()
<ide> {
<del> return $this->limit(1)->count() > 0;
<add> $limit = $this->limit;
<add>
<add> $result = $this->limit(1)->count() > 0;
<add>
<add> $this->limit($limit);
<add>
<add> return $result;
<ide> }
<ide>
<ide> /**
| 1
|
Javascript
|
Javascript
|
pass systrace and refresh as globals
|
306edff62c5af8997a9760c2b7b7f600ced5510e
|
<ide><path>Libraries/Core/setUpReactRefresh.js
<ide> if (__DEV__) {
<ide> },
<ide> };
<ide>
<del> (require: any).Refresh = Refresh;
<add> // The metro require polyfill can not have dependencies (applies for all polyfills).
<add> // Expose `Refresh` by assigning it to global to make it available in the polyfill.
<add> global[(global.__METRO_GLOBAL_PREFIX__ || '') + '__ReactRefresh'] = Refresh;
<ide> }
<ide><path>Libraries/Performance/Systrace.js
<ide> const Systrace = {
<ide> };
<ide>
<ide> if (__DEV__) {
<del> // This is needed, because require callis in polyfills are not processed as
<del> // other files. Therefore, calls to `require('moduleId')` are not replaced
<del> // with numeric IDs
<del> // TODO(davidaurelio) Scan polyfills for dependencies, too (t9759686)
<del> (require: $FlowFixMe).Systrace = Systrace;
<add> // The metro require polyfill can not have dependencies (true for all polyfills).
<add> // Ensure that `Systrace` is available in polyfill by exposing it globally.
<add> global[(global.__METRO_GLOBAL_PREFIX__ || '') + '__SYSTRACE'] = Systrace;
<ide> }
<ide>
<ide> module.exports = Systrace;
| 2
|
Ruby
|
Ruby
|
fix odebug calls
|
a6643b4adf13f7bb5b7ba7f771241a471189d946
|
<ide><path>Library/Homebrew/cask/audit.rb
<ide> def run!
<ide> check_bitbucket_repository
<ide> self
<ide> rescue => e
<del> odebug "#{e.message}\n#{e.backtrace.join("\n")}"
<add> odebug e, e.backtrace
<ide> add_error "exception while auditing #{cask}: #{e.message}"
<ide> self
<ide> end
<ide><path>Library/Homebrew/formula_versions.rb
<ide> def formula_at_revision(rev)
<ide> rescue *IGNORED_EXCEPTIONS => e
<ide> # We rescue these so that we can skip bad versions and
<ide> # continue walking the history
<del> odebug "#{e} in #{name} at revision #{rev}", e.backtrace if debug?
<add> odebug "#{e} in #{name} at revision #{rev}", e.backtrace
<ide> rescue FormulaUnavailableError
<ide> nil
<ide> ensure
<ide> def bottle_version_map(branch)
<ide> end
<ide> return map if versions_seen > MAX_VERSIONS_DEPTH
<ide> rescue MacOSVersionError => e
<del> odebug "#{e} in #{name} at revision #{rev}" if debug?
<add> odebug "#{e} in #{name} at revision #{rev}"
<ide> break
<ide> end
<ide> map
| 2
|
Javascript
|
Javascript
|
use default value for undefined props
|
58b3ae3136e959350ece18a753392ab23ab177b6
|
<ide><path>src/core/ReactCompositeComponent.js
<ide> var ReactCompositeComponentMixin = {
<ide> var propName;
<ide> var defaultProps = this._defaultProps;
<ide> for (propName in defaultProps) {
<del> if (!(propName in props)) {
<add> if (typeof props[propName] === 'undefined') {
<ide> props[propName] = defaultProps[propName];
<ide> }
<ide> }
<ide><path>src/core/__tests__/ReactCompositeComponent-test.js
<ide> describe('ReactCompositeComponent', function() {
<ide>
<ide> });
<ide>
<add> it('should use default values for undefined props', function() {
<add> var Component = React.createClass({
<add> getDefaultProps: function() {
<add> return {key: 'testKey'};
<add> },
<add> render: function() {
<add> return <span />;
<add> }
<add> });
<add>
<add> var instance1 = <Component />;
<add> ReactTestUtils.renderIntoDocument(instance1);
<add> reactComponentExpect(instance1).scalarPropsEqual({key: 'testKey'});
<add>
<add> var instance2 = <Component key={undefined} />;
<add> ReactTestUtils.renderIntoDocument(instance2);
<add> reactComponentExpect(instance2).scalarPropsEqual({key: 'testKey'});
<add>
<add> var instance3 = <Component key={null} />;
<add> ReactTestUtils.renderIntoDocument(instance3);
<add> reactComponentExpect(instance3).scalarPropsEqual({key: null});
<add> });
<add>
<ide> it('should normalize props with default values', function() {
<ide> var Component = React.createClass({
<ide> propTypes: {key: ReactPropTypes.string.isRequired},
| 2
|
Ruby
|
Ruby
|
remove ancient todos [ci skip]
|
85261a5e96ca4b7a03899faf8e04a3e0cb5567e2
|
<ide><path>actionpack/test/controller/output_escaping_test.rb
<ide> class OutputEscapingTest < ActiveSupport::TestCase
<ide> end
<ide>
<ide> test "escapeHTML shouldn't touch explicitly safe strings" do
<del> # TODO this seems easier to compose and reason about, but
<del> # this should be verified
<ide> assert_equal "<", ERB::Util.h("<".html_safe)
<ide> end
<ide>
<ide><path>actionpack/test/template/url_helper_test.rb
<ide> def test_url_for_with_back_and_no_referer
<ide> assert_equal 'javascript:history.back()', url_for(:back)
<ide> end
<ide>
<del> # TODO: missing test cases
<ide> def test_button_to_with_straight_url
<ide> assert_dom_equal %{<form method="post" action="http://www.example.com" class="button_to"><div><input type="submit" value="Hello" /></div></form>}, button_to("Hello", "http://www.example.com")
<ide> end
<ide><path>activerecord/test/cases/locking_test.rb
<ide> def counter_test(model, expected_count)
<ide> # is so cumbersome. Will deadlock Ruby threads if the underlying db.execute
<ide> # blocks, so separate script called by Kernel#system is needed.
<ide> # (See exec vs. async_exec in the PostgreSQL adapter.)
<del>
<del># TODO: The Sybase, and OpenBase adapters currently have no support for pessimistic locking
<del>
<ide> unless current_adapter?(:SybaseAdapter, :OpenBaseAdapter) || in_memory_db?
<ide> class PessimisticLockingTest < ActiveRecord::TestCase
<ide> self.use_transactional_fixtures = false
| 3
|
Python
|
Python
|
patch albert with heads in tensorflow
|
1abd53b1aa2f15953bbbbbfefda885d1d9c9d94b
|
<ide><path>src/transformers/modeling_tf_albert.py
<ide> logger = logging.getLogger(__name__)
<ide>
<ide> TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
<del> "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tf_model.h5",
<del> "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-tf_model.h5",
<del> "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-tf_model.h5",
<del> "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-tf_model.h5",
<del> "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-tf_model.h5",
<del> "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-tf_model.h5",
<del> "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-tf_model.h5",
<del> "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-tf_model.h5",
<add> "albert-base-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-with-prefix-tf_model.h5",
<add> "albert-large-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-with-prefix-tf_model.h5",
<add> "albert-xlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-with-prefix-tf_model.h5",
<add> "albert-xxlarge-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-with-prefix-tf_model.h5",
<add> "albert-base-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-with-prefix-tf_model.h5",
<add> "albert-large-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-with-prefix-tf_model.h5",
<add> "albert-xlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-with-prefix-tf_model.h5",
<add> "albert-xxlarge-v2": "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-with-prefix-tf_model.h5",
<ide> }
<ide>
<ide>
<ide> def call(self, hidden_states):
<ide> return hidden_states
<ide>
<ide>
<del>ALBERT_START_DOCSTRING = r"""
<del> This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
<del> Use it as a regular TF 2.0 Keras Model and
<del> refer to the TF 2.0 documentation for all matter related to general usage and behavior.
<del>
<del> .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`:
<del> https://arxiv.org/abs/1909.11942
<del>
<del> .. _`tf.keras.Model`:
<del> https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
<del>
<del> .. note::
<del>
<del> TF 2.0 models accepts two formats as inputs:
<del>
<del> - having all inputs as keyword arguments (like PyTorch models), or
<del> - having all inputs as a list, tuple or dict in the first positional arguments.
<del>
<del> This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
<del> all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
<del>
<del> If you choose this second option, there are three possibilities you can use to gather all the input Tensors
<del> in the first positional argument :
<del>
<del> - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
<del> - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
<del> :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
<del> - a dictionary with one or several input Tensors associated to the input names given in the docstring:
<del> :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
<del>
<del> Args:
<del> config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
<del> Initializing with a config file does not load the weights associated with the model, only the configuration.
<del> Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
<del>"""
<del>
<del>ALBERT_INPUTS_DOCSTRING = r"""
<del> Args:
<del> input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
<del> Indices of input sequence tokens in the vocabulary.
<del>
<del> Indices can be obtained using :class:`transformers.AlbertTokenizer`.
<del> See :func:`transformers.PreTrainedTokenizer.encode` and
<del> :func:`transformers.PreTrainedTokenizer.encode_plus` for details.
<del>
<del> `What are input IDs? <../glossary.html#input-ids>`__
<del> attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional, defaults to :obj:`None`):
<del> Mask to avoid performing attention on padding token indices.
<del> Mask values selected in ``[0, 1]``:
<del> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
<del>
<del> `What are attention masks? <../glossary.html#attention-mask>`__
<del> token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<del> Segment token indices to indicate first and second portions of the inputs.
<del> Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
<del> corresponds to a `sentence B` token
<del>
<del> `What are token type IDs? <../glossary.html#token-type-ids>`_
<del> position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<del> Indices of positions of each input sequence tokens in the position embeddings.
<del> Selected in the range ``[0, config.max_position_embeddings - 1]``.
<del>
<del> `What are position IDs? <../glossary.html#position-ids>`_
<del> head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
<del> Mask to nullify selected heads of the self-attention modules.
<del> Mask values selected in ``[0, 1]``:
<del> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
<del> input_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
<del> Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
<del> This is useful if you want more control over how to convert `input_ids` indices into associated vectors
<del> than the model's internal embedding lookup matrix.
<del> training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
<del> Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
<del> (if set to :obj:`False`) for evaluation.
<del>"""
<del>
<del>
<del>@add_start_docstrings(
<del> "The bare Albert Model transformer outputing raw hidden-states without any specific head on top.",
<del> ALBERT_START_DOCSTRING,
<del>)
<del>class TFAlbertModel(TFAlbertPreTrainedModel):
<add>class TFAlbertMainLayer(tf.keras.layers.Layer):
<ide> def __init__(self, config, **kwargs):
<ide> super().__init__(config, **kwargs)
<ide> self.num_hidden_layers = config.num_hidden_layers
<ide> def _prune_heads(self, heads_to_prune):
<ide> """
<ide> raise NotImplementedError
<ide>
<del> @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
<ide> def call(
<del> self,
<del> inputs,
<del> attention_mask=None,
<del> token_type_ids=None,
<del> position_ids=None,
<del> head_mask=None,
<del> inputs_embeds=None,
<del> training=False,
<add> self,
<add> inputs,
<add> attention_mask=None,
<add> token_type_ids=None,
<add> position_ids=None,
<add> head_mask=None,
<add> inputs_embeds=None,
<add> training=False,
<ide> ):
<del> r"""
<del> Returns:
<del> :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
<del> last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
<del> Sequence of hidden-states at the output of the last layer of the model.
<del> pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
<del> Last layer hidden-state of the first token of the sequence (classification token)
<del> further processed by a Linear layer and a Tanh activation function. The Linear
<del> layer weights are trained from the next sentence prediction (classification)
<del> objective during Albert pretraining. This output is usually *not* a good summary
<del> of the semantic content of the input, you're often better with averaging or pooling
<del> the sequence of hidden-states for the whole input sequence.
<del> hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
<del> tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
<del> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<del>
<del> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<del> attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
<del> tuple of :obj:`tf.Tensor` (one for each layer) of shape
<del> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
<del>
<del> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<del>
<del> Examples::
<del>
<del> import tensorflow as tf
<del> from transformers import AlbertTokenizer, TFAlbertModel
<del>
<del> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
<del> model = TFAlbertModel.from_pretrained('albert-base-v2')
<del> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
<del> outputs = model(input_ids)
<del> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
<del>
<del> """
<ide> if isinstance(inputs, (tuple, list)):
<ide> input_ids = inputs[0]
<ide> attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
<ide> def call(
<ide> return outputs
<ide>
<ide>
<add>ALBERT_START_DOCSTRING = r"""
<add> This model is a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ sub-class.
<add> Use it as a regular TF 2.0 Keras Model and
<add> refer to the TF 2.0 documentation for all matter related to general usage and behavior.
<add>
<add> .. _`ALBERT: A Lite BERT for Self-supervised Learning of Language Representations`:
<add> https://arxiv.org/abs/1909.11942
<add>
<add> .. _`tf.keras.Model`:
<add> https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
<add>
<add> .. note::
<add>
<add> TF 2.0 models accepts two formats as inputs:
<add>
<add> - having all inputs as keyword arguments (like PyTorch models), or
<add> - having all inputs as a list, tuple or dict in the first positional arguments.
<add>
<add> This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
<add> all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
<add>
<add> If you choose this second option, there are three possibilities you can use to gather all the input Tensors
<add> in the first positional argument :
<add>
<add> - a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
<add> - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
<add> :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
<add> - a dictionary with one or several input Tensors associated to the input names given in the docstring:
<add> :obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
<add>
<add> Args:
<add> config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
<add> Initializing with a config file does not load the weights associated with the model, only the configuration.
<add> Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
<add>"""
<add>
<add>ALBERT_INPUTS_DOCSTRING = r"""
<add> Args:
<add> input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
<add> Indices of input sequence tokens in the vocabulary.
<add>
<add> Indices can be obtained using :class:`transformers.AlbertTokenizer`.
<add> See :func:`transformers.PreTrainedTokenizer.encode` and
<add> :func:`transformers.PreTrainedTokenizer.encode_plus` for details.
<add>
<add> `What are input IDs? <../glossary.html#input-ids>`__
<add> attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional, defaults to :obj:`None`):
<add> Mask to avoid performing attention on padding token indices.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
<add>
<add> `What are attention masks? <../glossary.html#attention-mask>`__
<add> token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Segment token indices to indicate first and second portions of the inputs.
<add> Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
<add> corresponds to a `sentence B` token
<add>
<add> `What are token type IDs? <../glossary.html#token-type-ids>`_
<add> position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
<add> Indices of positions of each input sequence tokens in the position embeddings.
<add> Selected in the range ``[0, config.max_position_embeddings - 1]``.
<add>
<add> `What are position IDs? <../glossary.html#position-ids>`_
<add> head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
<add> Mask to nullify selected heads of the self-attention modules.
<add> Mask values selected in ``[0, 1]``:
<add> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
<add> input_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
<add> Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
<add> This is useful if you want more control over how to convert `input_ids` indices into associated vectors
<add> than the model's internal embedding lookup matrix.
<add> training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
<add> Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
<add> (if set to :obj:`False`) for evaluation.
<add>"""
<add>
<add>
<add>@add_start_docstrings(
<add> "The bare Albert Model transformer outputing raw hidden-states without any specific head on top.",
<add> ALBERT_START_DOCSTRING,
<add>)
<add>class TFAlbertModel(TFAlbertPreTrainedModel):
<add> def __init__(self, config, *inputs, **kwargs):
<add> super().__init__(config, *inputs, **kwargs)
<add> self.albert = TFAlbertMainLayer(config, name="albert")
<add>
<add> @add_start_docstrings_to_callable(ALBERT_INPUTS_DOCSTRING)
<add> def call(self, inputs, **kwargs):
<add> r"""
<add> Returns:
<add> :obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.AlbertConfig`) and inputs:
<add> last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
<add> Sequence of hidden-states at the output of the last layer of the model.
<add> pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
<add> Last layer hidden-state of the first token of the sequence (classification token)
<add> further processed by a Linear layer and a Tanh activation function. The Linear
<add> layer weights are trained from the next sentence prediction (classification)
<add> objective during Albert pretraining. This output is usually *not* a good summary
<add> of the semantic content of the input, you're often better with averaging or pooling
<add> the sequence of hidden-states for the whole input sequence.
<add> hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
<add> tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
<add> of shape :obj:`(batch_size, sequence_length, hidden_size)`.
<add>
<add> Hidden-states of the model at the output of each layer plus the initial embedding outputs.
<add> attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
<add> tuple of :obj:`tf.Tensor` (one for each layer) of shape
<add> :obj:`(batch_size, num_heads, sequence_length, sequence_length)`:
<add>
<add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
<add>
<add> Examples::
<add>
<add> import tensorflow as tf
<add> from transformers import AlbertTokenizer, TFAlbertModel
<add>
<add> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
<add> model = TFAlbertModel.from_pretrained('albert-base-v2')
<add> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
<add> outputs = model(input_ids)
<add> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
<add>
<add> """
<add> outputs = self.albert(inputs, **kwargs)
<add> return outputs
<add>
<add>
<ide> @add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING)
<ide> class TFAlbertForMaskedLM(TFAlbertPreTrainedModel):
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> super(TFAlbertForMaskedLM, self).__init__(config, *inputs, **kwargs)
<ide>
<del> self.albert = TFAlbertModel(config, name="albert")
<add> self.albert = TFAlbertMainLayer(config, name="albert")
<ide> self.predictions = TFAlbertMLMHead(config, self.albert.embeddings, name="predictions")
<ide>
<ide> def get_output_embeddings(self):
<ide> def __init__(self, config, *inputs, **kwargs):
<ide> super(TFAlbertForSequenceClassification, self).__init__(config, *inputs, **kwargs)
<ide> self.num_labels = config.num_labels
<ide>
<del> self.albert = TFAlbertModel(config, name="albert")
<add> self.albert = TFAlbertMainLayer(config, name="albert")
<ide> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
<ide> self.classifier = tf.keras.layers.Dense(
<ide> config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
| 1
|
PHP
|
PHP
|
fix condition for skipping 32 bit systems
|
2b931983c225a8305f046f526b308b879673f3dd
|
<ide><path>tests/TestCase/Database/TypeTest.php
<ide> public function testIntegerToStatement() {
<ide> */
<ide> public function testBigintegerToPHP() {
<ide> $this->skipIf(
<del> isset($_SERVER['PROCESSOR_ARCHITECTURE']) && $_SERVER['PROCESSOR_ARCHITECTURE'] === 'x86',
<add> PHP_INT_SIZE === 4,
<ide> 'This test requires a php version compiled for 64 bits'
<ide> );
<ide> $type = Type::build('biginteger');
| 1
|
Javascript
|
Javascript
|
set _csrf to be httponly
|
3361419e87947327f2de886eb1d0cd49a1f42510
|
<ide><path>api-server/src/server/middlewares/csurf.js
<ide> export const csrfOptions = {
<ide>
<ide> export default function getCsurf() {
<ide> const protection = csurf({
<del> cookie: csrfOptions
<add> cookie: { ...csrfOptions, httpOnly: true }
<ide> });
<ide> return function csrf(req, res, next) {
<ide> const { path } = req;
<ide><path>client/gatsby-browser.js
<ide> export const wrapPageElement = layoutSelector;
<ide> export const disableCorePrefetching = () => true;
<ide>
<ide> export const onClientEntry = () => {
<del> // purge the csrf cookies, rather than relying what the browser decides a
<del> // Session duration is
<del> cookies.erase('_csrf');
<ide> // the token must be erased since it is only valid for the old _csrf secret
<ide> cookies.erase('csrf_token');
<ide> };
| 2
|
Python
|
Python
|
add t5 to pipeline(task='summarization')
|
9c683ef01e19c4dc1216dcd1ae3c8e7c44d7b2b9
|
<ide><path>src/transformers/configuration_utils.py
<ide> def to_json_file(self, json_file_path):
<ide> """
<ide> with open(json_file_path, "w", encoding="utf-8") as writer:
<ide> writer.write(self.to_json_string())
<add>
<add> def update(self, config_dict: Dict):
<add> """
<add> Updates attributes of this class
<add> with attributes from `config_dict`.
<add>
<add> Args:
<add> :obj:`Dict[str, any]`: Dictionary of attributes that shall be updated for this class.
<add> """
<add> for key, value in config_dict.items():
<add> setattr(self, key, value)
<ide><path>src/transformers/modeling_tf_utils.py
<ide> def _generate_beam_search(
<ide> # set eos token prob to zero if min_length is not reached
<ide> if eos_token_id is not None and cur_len < min_length:
<ide> # create eos_token_id boolean mask
<add> num_batch_hypotheses = batch_size * num_beams
<add>
<ide> is_token_logit_eos_token = tf.convert_to_tensor(
<ide> [True if token is eos_token_id else False for token in range(vocab_size)], dtype=tf.bool
<ide> )
<del> eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [batch_size, vocab_size])
<add> eos_token_indices_mask = tf.broadcast_to(is_token_logit_eos_token, [num_batch_hypotheses, vocab_size])
<ide>
<ide> scores = set_tensor_by_indices_to_value(scores, eos_token_indices_mask, -float("inf"))
<ide>
<ide><path>src/transformers/pipelines.py
<ide> from .configuration_bart import BartConfig
<ide> from .configuration_distilbert import DistilBertConfig
<ide> from .configuration_roberta import RobertaConfig
<add>from .configuration_t5 import T5Config
<ide> from .configuration_utils import PretrainedConfig
<ide> from .configuration_xlm import XLMConfig
<ide> from .data import SquadExample, squad_convert_examples_to_features
<ide> AutoModelForTokenClassification,
<ide> AutoModelWithLMHead,
<ide> )
<del> from .modeling_bart import BartForConditionalGeneration
<ide>
<ide>
<ide> logger = logging.getLogger(__name__)
<ide> def __init__(
<ide> tokenizer: PreTrainedTokenizer,
<ide> modelcard: Optional[ModelCard] = None,
<ide> framework: Optional[str] = None,
<add> task: str = "",
<ide> args_parser: ArgumentHandler = None,
<ide> device: int = -1,
<ide> binary_output: bool = False,
<ide> def __init__(
<ide> if self.framework == "pt" and self.device.type == "cuda":
<ide> self.model = self.model.to(self.device)
<ide>
<add> # Update config with task specific parameters
<add> task_specific_params = self.model.config.task_specific_params
<add> if task_specific_params is not None and task in task_specific_params:
<add> self.model.config.update(task_specific_params.get(task))
<add>
<ide> def save_pretrained(self, save_directory):
<ide> """
<ide> Save the pipeline's model and tokenizer to the specified save_directory
<ide> def inputs_for_model(self, features: Union[dict, List[dict]]) -> Dict:
<ide> """
<ide> args = ["input_ids", "attention_mask"]
<ide>
<del> if not isinstance(self.model.config, (DistilBertConfig, XLMConfig, RobertaConfig, BartConfig)):
<add> if not isinstance(self.model.config, (DistilBertConfig, XLMConfig, RobertaConfig, BartConfig, T5Config)):
<ide> args += ["token_type_ids"]
<ide>
<ide> # PR #1548 (CLI) There is an issue with attention_mask
<ide> def inputs_for_model(self, features: Union[dict, List[dict]]) -> Dict:
<ide> else:
<ide> return {k: [feature[k] for feature in features] for k in args}
<ide>
<del> def _parse_and_tokenize(self, *texts, **kwargs):
<add> def _parse_and_tokenize(self, *texts, pad_to_max_length=False, **kwargs):
<ide> """
<ide> Parse arguments and tokenize
<ide> """
<ide> # Parse arguments
<ide> inputs = self._args_parser(*texts, **kwargs)
<ide> inputs = self.tokenizer.batch_encode_plus(
<del> inputs, add_special_tokens=True, return_tensors=self.framework, max_length=self.tokenizer.max_len
<add> inputs,
<add> add_special_tokens=True,
<add> return_tensors=self.framework,
<add> max_length=self.tokenizer.max_len,
<add> pad_to_max_length=pad_to_max_length,
<ide> )
<ide>
<ide> # Filter out features not available on specific models
<ide> def __init__(
<ide> framework: Optional[str] = None,
<ide> args_parser: ArgumentHandler = None,
<ide> device: int = -1,
<add> task: str = "",
<ide> ):
<ide> super().__init__(
<ide> model=model,
<ide> def __init__(
<ide> args_parser=args_parser,
<ide> device=device,
<ide> binary_output=True,
<add> task=task,
<ide> )
<ide>
<ide> def __call__(self, *args, **kwargs):
<ide> def __init__(
<ide> args_parser: ArgumentHandler = None,
<ide> device: int = -1,
<ide> topk=5,
<add> task: str = "",
<ide> ):
<ide> super().__init__(
<ide> model=model,
<ide> def __init__(
<ide> args_parser=args_parser,
<ide> device=device,
<ide> binary_output=True,
<add> task=task,
<ide> )
<ide>
<ide> self.topk = topk
<ide> def __init__(
<ide> device: int = -1,
<ide> binary_output: bool = False,
<ide> ignore_labels=["O"],
<add> task: str = "",
<ide> ):
<ide> super().__init__(
<ide> model=model,
<ide> def __init__(
<ide> args_parser=args_parser,
<ide> device=device,
<ide> binary_output=binary_output,
<add> task=task,
<ide> )
<ide>
<ide> self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
<ide> def __init__(
<ide> modelcard: Optional[ModelCard] = None,
<ide> framework: Optional[str] = None,
<ide> device: int = -1,
<add> task: str = "",
<ide> **kwargs
<ide> ):
<ide> super().__init__(
<ide> def __init__(
<ide> framework=framework,
<ide> args_parser=QuestionAnsweringArgumentHandler(),
<ide> device=device,
<add> task=task,
<ide> **kwargs,
<ide> )
<ide>
<ide> class SummarizationPipeline(Pipeline):
<ide>
<ide> Usage::
<ide>
<add> # use bart in pytorch
<ide> summarizer = pipeline("summarization")
<del> summarizer("Sam Shleifer writes the best docstring examples in the whole world.")
<add> summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
<add>
<add> # use t5 in tf
<add> summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")
<add> summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
<ide>
<ide> Supported Models:
<del> The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
<del> currently only ``BartForConditionalGeneration.from_pretrained('bart-large-cnn')``
<add> The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
<ide>
<ide> Arguments:
<ide> model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
<ide> class SummarizationPipeline(Pipeline):
<ide> on the associated CUDA device id.
<ide> """
<ide>
<del> task = "summarization"
<del>
<ide> def __call__(
<del> self,
<del> *documents,
<del> return_tensors=False,
<del> return_text=True,
<del> max_length=142,
<del> min_length=21,
<del> clean_up_tokenization_spaces=False,
<del> **generate_kwargs
<add> self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
<ide> ):
<ide> r"""
<ide> Args:
<ide> *documents: (list of strings) articles to be summarized
<ide> return_text: (bool, default=True) whether to add a decoded "summary_text" to each result
<ide> return_tensors: (bool, default=False) whether to return the raw "summary_token_ids" to each result
<ide>
<del> max_length: (`optional`) int
<del> The max length of the sequence to be generated. Does not include tokens in input_ids.
<del> min_len: (`optional`) int
<del> no_repeat_ngram_size: (`optional`) int. ban ngrams of this length from being repeated in the generated text
<ide> clean_up_tokenization_spaces: (`optional`) bool whether to include extra spaces in the output
<ide> **generate_kwargs: extra kwargs passed to `self.model.generate`_
<ide>
<ide> def __call__(
<ide>
<ide> """
<ide> assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
<del> if self.framework == "tf":
<del> raise NotImplementedError("Tensorflow not supported")
<add> assert len(documents) > 0, "Please provide a document to summarize"
<add>
<add> if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
<add> raise NotImplementedError(
<add> "Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
<add> )
<add>
<add> prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
<add>
<add> if isinstance(documents[0], list):
<add> assert (
<add> self.tokenizer.pad_token_id is not None
<add> ), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
<add>
<add> documents = ([prefix + document for document in documents[0]],)
<add> pad_to_max_length = True
<add>
<add> elif isinstance(documents[0], str):
<add> documents = (prefix + documents[0],)
<add> pad_to_max_length = False
<add> else:
<add> raise ValueError(
<add> " `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
<add> documents[0]
<add> )
<add> )
<add>
<ide> with self.device_placement():
<del> inputs = self._parse_and_tokenize(*documents)
<del> inputs = self.ensure_tensor_on_device(**inputs)
<add> inputs = self._parse_and_tokenize(*documents, pad_to_max_length=pad_to_max_length)
<add>
<add> if self.framework == "pt":
<add> inputs = self.ensure_tensor_on_device(**inputs)
<add> input_length = inputs["input_ids"].shape[-1]
<add> elif self.framework == "tf":
<add> input_length = tf.shape(inputs["input_ids"])[-1]
<add>
<add> if input_length < self.model.config.min_length // 2:
<add> logger.warning(
<add> "Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length in config and insert config manually".format(
<add> self.model.config.min_length, input_length
<add> )
<add> )
<add>
<add> if input_length < self.model.config.max_length:
<add> logger.warning(
<add> "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length in config and insert config manually".format(
<add> self.model.config.max_length, input_length
<add> )
<add> )
<add>
<ide> summaries = self.model.generate(
<del> inputs["input_ids"],
<del> attention_mask=inputs["attention_mask"],
<del> max_length=max_length,
<del> min_length=min_length,
<del> do_sample=False,
<del> **generate_kwargs,
<add> inputs["input_ids"], attention_mask=inputs["attention_mask"], **generate_kwargs,
<ide> )
<add>
<ide> results = []
<ide> for summary in summaries:
<ide> record = {}
<ide> def __call__(
<ide> },
<ide> "summarization": {
<ide> "impl": SummarizationPipeline,
<del> "pt": BartForConditionalGeneration if is_torch_available() else None,
<del> "tf": None,
<add> "tf": TFAutoModelWithLMHead if is_tf_available() else None,
<add> "pt": AutoModelWithLMHead if is_torch_available() else None,
<ide> "default": {
<ide> "model": {"pt": "bart-large-cnn", "tf": None},
<ide> "config": None,
<ide> def pipeline(
<ide> framework = framework or get_framework(model)
<ide>
<ide> targeted_task = SUPPORTED_TASKS[task]
<del> task, model_class = targeted_task["impl"], targeted_task[framework]
<add> task_class, model_class = targeted_task["impl"], targeted_task[framework]
<ide>
<ide> # Use default model/config/tokenizer for the task if no model is provided
<ide> if model is None:
<ide> def pipeline(
<ide> )
<ide> model = model_class.from_pretrained(model, config=config, **model_kwargs)
<ide>
<del> return task(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, **kwargs)
<add> return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
<ide><path>tests/test_pipelines.py
<ide> (("distilroberta-base", {"use_fast": False}), "distilroberta-base", None),
<ide> ]
<ide>
<add>SUMMARIZATION_FINETUNED_MODELS = {("bart-large-cnn", "bart-large-cnn"), ("t5-small", "t5-small")}
<add>TF_SUMMARIZATION_FINETUNED_MODELS = {("t5-small", "t5-small")}
<add>
<ide>
<ide> class MonoColumnInputTestCase(unittest.TestCase):
<ide> def _test_mono_column_pipeline(
<ide> def test_summarization(self):
<ide> valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
<ide> invalid_inputs = [4, "<mask>"]
<ide> mandatory_keys = ["summary_text"]
<del> nlp = pipeline(task="summarization")
<del> self._test_mono_column_pipeline(
<del> nlp, valid_inputs, invalid_inputs, mandatory_keys,
<del> )
<add> for model, tokenizer in SUMMARIZATION_FINETUNED_MODELS:
<add> nlp = pipeline(task="summarization", model=model, tokenizer=tokenizer)
<add> self._test_mono_column_pipeline(
<add> nlp, valid_inputs, invalid_inputs, mandatory_keys,
<add> )
<add>
<add> @require_tf
<add> def test_tf_summarization(self):
<add> valid_inputs = ["A string like this", ["list of strings entry 1", "list of strings v2"]]
<add> invalid_inputs = [4, "<mask>"]
<add> mandatory_keys = ["summary_text"]
<add> for model, tokenizer in TF_SUMMARIZATION_FINETUNED_MODELS:
<add> nlp = pipeline(task="summarization", model=model, tokenizer=tokenizer, framework="tf")
<add> self._test_mono_column_pipeline(
<add> nlp, valid_inputs, invalid_inputs, mandatory_keys,
<add> )
<ide>
<ide>
<ide> class MultiColumnInputTestCase(unittest.TestCase):
| 4
|
Mixed
|
Go
|
remove multiple interface in an endpoint
|
a5bd12b9631607e5701665c43df12ff94aa0eadf
|
<ide><path>libnetwork/cmd/ovrouter/ovrouter.go
<ide> type endpoint struct {
<ide> addr net.IPNet
<ide> mac net.HardwareAddr
<ide> name string
<del> id int
<ide> }
<ide>
<ide> func (r *router) RegisterDriver(name string, driver driverapi.Driver, c driverapi.Capability) error {
<ide> r.d = driver
<ide> return nil
<ide> }
<ide>
<del>func (ep *endpoint) Interfaces() []driverapi.InterfaceInfo {
<add>func (ep *endpoint) Interface() driverapi.InterfaceInfo {
<ide> return nil
<ide> }
<ide>
<del>func (ep *endpoint) AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<del> ep.id = ID
<add>func (ep *endpoint) AddInterface(mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<ide> ep.addr = ipv4
<ide> ep.mac = mac
<ide> return nil
<ide> }
<ide>
<del>func (ep *endpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
<del> return []driverapi.InterfaceNameInfo{ep}
<del>
<add>func (ep *endpoint) InterfaceName() driverapi.InterfaceNameInfo {
<add> return ep
<ide> }
<ide>
<ide> func (ep *endpoint) SetNames(srcName, dstPrefix string) error {
<ide> ep.name = srcName
<ide> return nil
<ide> }
<ide>
<del>func (ep *endpoint) ID() int {
<del> return ep.id
<del>}
<del>
<ide> func (ep *endpoint) SetGateway(net.IP) error {
<ide> return nil
<ide> }
<ide> func (ep *endpoint) SetGatewayIPv6(net.IP) error {
<ide> }
<ide>
<ide> func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int,
<del> nextHop net.IP, interfaceID int) error {
<add> nextHop net.IP) error {
<ide> return nil
<ide> }
<ide>
<ide><path>libnetwork/docs/design.md
<ide> Libnetwork implements Container Network Model (CNM) which formalizes the steps r
<ide> **Sandbox**
<ide>
<ide> A Sandbox contains the configuration of a container's network stack.
<del>This includes management of the container's interfaces, routing table and DNS settings.
<add>This includes management of the container's interfaces, routing table and DNS settings.
<ide> An implementation of a Sandbox could be a Linux Network Namespace, a FreeBSD Jail or other similar concept.
<ide> A Sandbox may contain *many* endpoints from *multiple* networks.
<ide>
<ide> Consumers of the CNM, like Docker for example, interact through the CNM Objects
<ide>
<ide> 7. `endpoint.Delete()` is used to delete an endpoint from a network. This results in deleting an endpoint and cleaning up the cached `sandbox.Info`.
<ide>
<del>8. `network.Delete()` is used to delete a network. LibNetwork will not allow the delete to proceed if there are any existing endpoints attached to the Network.
<add>8. `network.Delete()` is used to delete a network. LibNetwork will not allow the delete to proceed if there are any existing endpoints attached to the Network.
<ide>
<ide>
<ide> ## Implementation Details
<ide> LibNetwork's Network and Endpoint APIs are primarily for managing the correspond
<ide>
<ide> ### Sandbox
<ide>
<del>Libnetwork provides a framework to implement of a Sandbox in multiple operating systems. Currently we have implemented Sandbox for Linux using `namespace_linux.go` and `configure_linux.go` in `sandbox` package
<add>Libnetwork provides a framework to implement of a Sandbox in multiple operating systems. Currently we have implemented Sandbox for Linux using `namespace_linux.go` and `configure_linux.go` in `sandbox` package
<ide> This creates a Network Namespace for each sandbox which is uniquely identified by a path on the host filesystem.
<ide> Netlink calls are used to move interfaces from the global namespace to the Sandbox namespace.
<ide> Netlink is also used to manage the routing table in the namespace.
<ide> Drivers are essentially an extension of libnetwork and provides the actual imple
<ide> * `driver.CreateEndpoint`
<ide> * `driver.DeleteEndpoint`
<ide> * `driver.Join`
<del>* `driver.Leave`
<add>* `driver.Leave`
<ide>
<ide> These Driver facing APIs makes use of unique identifiers (`networkid`,`endpointid`,...) instead of names (as seen in user-facing APIs).
<ide>
<ide> The APIs are still work in progress and there can be changes to these based on t
<ide>
<ide> * `Driver.CreateEndpoint`
<ide>
<del>This method is passed an interface `EndpointInfo`, with methods `Interfaces` and `AddInterface`.
<add>This method is passed an interface `EndpointInfo`, with methods `Interface` and `AddInterface`.
<ide>
<del>If the slice returned by `Interfaces` is non-empty, the driver is expected to make use of the interface information therein (e.g., treating the address or addresses as statically supplied), and must return an error if it cannot. If the slice is empty, the driver should allocate zero or more _fresh_ interfaces, and use `AddInterface` to record them; or return an error if it cannot.
<add>If the value returned by `Interface` is non-nil, the driver is expected to make use of the interface information therein (e.g., treating the address or addresses as statically supplied), and must return an error if it cannot. If the value is `nil`, the driver should allocate exactly one _fresh_ interface, and use `AddInterface` to record them; or return an error if it cannot.
<ide>
<del>It is forbidden to use `AddInterface` if `Interfaces` is non-empty.
<add>It is forbidden to use `AddInterface` if `Interface` is non-nil.
<ide>
<ide> ## Implementations
<ide>
<ide> For more details on its design, please see the [Overlay Driver Design](overlay.m
<ide> The `remote` package does not provide a driver, but provides a means of supporting drivers over a remote transport.
<ide> This allows a driver to be written in a language of your choice.
<ide> For further details, please see the [Remote Driver Design](remote.md).
<del>
<ide><path>libnetwork/docs/remote.md
<ide> If the remote process cannot decode, or otherwise detects a syntactic problem wi
<ide> If the remote process can decode the request, but cannot complete the operation, it must send a response in the form
<ide>
<ide> {
<del> "Err": string
<add> "Err": string
<ide> }
<ide>
<ide> The string value supplied may appear in logs, so should not include confidential information.
<ide> The string value supplied may appear in logs, so should not include confidential
<ide> When loaded, a remote driver process receives an HTTP POST on the URL `/Plugin.Activate` with no payload. It must respond with a manifest of the form
<ide>
<ide> {
<del> "Implements": ["NetworkDriver"]
<add> "Implements": ["NetworkDriver"]
<ide> }
<ide>
<ide> Other entries in the list value are allowed; `"NetworkDriver"` indicates that the plugin should be registered with LibNetwork as a driver.
<ide> Other entries in the list value are allowed; `"NetworkDriver"` indicates that th
<ide> When the proxy is asked to create a network, the remote process shall receive a POST to the URL `/NetworkDriver.CreateNetwork` of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "Options": {
<del> ...
<del> }
<add> "NetworkID": string,
<add> "Options": {
<add> ...
<add> }
<ide> }
<ide>
<ide> The `NetworkID` value is generated by LibNetwork. The `Options` value is the arbitrary map given to the proxy by LibNetwork.
<ide> The response indicating success is empty:
<ide> When a network owned by the remote driver is deleted, the remote process shall receive a POST to the URL `/NetworkDriver.DeleteNetwork` of the form
<ide>
<ide> {
<del> "NetworkID": string
<add> "NetworkID": string
<ide> }
<ide>
<ide> The success response is empty:
<ide> The success response is empty:
<ide> When the proxy is asked to create an endpoint, the remote process shall receive a POST to the URL `/NetworkDriver.CreateEndpoint` of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "EndpointID": string,
<del> "Options": {
<del> ...
<del> },
<del> "Interfaces": [{
<del> "ID": int,
<del> "Address": string,
<del> "AddressIPv6": string,
<del> "MacAddress": string
<del> }, ...]
<add> "NetworkID": string,
<add> "EndpointID": string,
<add> "Options": {
<add> ...
<add> },
<add> "Interface": {
<add> "Address": string,
<add> "AddressIPv6": string,
<add> "MacAddress": string
<add> }
<ide> }
<ide>
<ide> The `NetworkID` is the generated identifier for the network to which the endpoint belongs; the `EndpointID` is a generated identifier for the endpoint.
<ide>
<ide> `Options` is an arbitrary map as supplied to the proxy.
<ide>
<del>The `Interfaces` value is a list with values of the form given. The fields in the `Interfaces` entries may be empty; and the `Interfaces` list itself may be empty. If supplied, `Address` is an IPv4 address and subnet in CIDR notation; e.g., `"192.168.34.12/16"`. If supplied, `AddressIPv6` is an IPv6 address and subnet in CIDR notation. `MacAddress` is a MAC address as a string; e.g., `"6e:75:32:60:44:c9"`.
<add>The `Interface` value is of the form given. The fields in the `Interface` may be empty; and the `Interface` itself may be empty. If supplied, `Address` is an IPv4 address and subnet in CIDR notation; e.g., `"192.168.34.12/16"`. If supplied, `AddressIPv6` is an IPv6 address and subnet in CIDR notation. `MacAddress` is a MAC address as a string; e.g., `"6e:75:32:60:44:c9"`.
<ide>
<ide> A success response is of the form
<ide>
<ide> {
<del> "Interfaces": [{
<del> "ID": int,
<del> "Address": string,
<del> "AddressIPv6": string,
<del> "MacAddress": string
<del> }, ...]
<add> "Interface": {
<add> "Address": string,
<add> "AddressIPv6": string,
<add> "MacAddress": string
<add> }
<ide> }
<ide>
<del>with values in the `Interfaces` entries as above. For each entry, an `ID` and `MacAddress` and either or both of `Address` and `AddressIPv6` must be given. The `ID` is arbitrary but must differ among entries. It is used to identify, within the scope of the endpoint, an individual interface during a `Join` call.
<add>with values in the `Interface` as above. As far as the value of `Interface` is concerned, `MacAddress` and either or both of `Address` and `AddressIPv6` must be given.
<ide>
<del>If the remote process was supplied entries in `Interfaces`, it must respond with an empty `Interfaces` list. LibNetwork will treat it as an error if it supplies a non-empty list and receives a non-empty list back, and roll back the operation.
<add>If the remote process was supplied a non-empty value in `Interface`, it must respond with an empty `Interface` value. LibNetwork will treat it as an error if it supplies a non-empty value and receives a non-empty value back, and roll back the operation.
<ide>
<ide> ### Endpoint operational info
<ide>
<ide> The proxy may be asked for "operational info" on an endpoint. When this happens, the remote process shall receive a POST to `/NetworkDriver.EndpointOperInfo` of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "EndpointID": string
<add> "NetworkID": string,
<add> "EndpointID": string
<ide> }
<ide>
<ide> where `NetworkID` and `EndpointID` have meanings as above. It must send a response of the form
<ide>
<ide> {
<del> "Value": { ... }
<add> "Value": { ... }
<ide> }
<ide>
<ide> where the value of the `Value` field is an arbitrary (possibly empty) map.
<ide> where the value of the `Value` field is an arbitrary (possibly empty) map.
<ide> When an endpoint is deleted, the remote process shall receive a POST to the URL `/NetworkDriver.DeleteEndpoint` with a body of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "EndpointID": string
<add> "NetworkID": string,
<add> "EndpointID": string
<ide> }
<ide>
<ide> where `NetworkID` and `EndpointID` have meanings as above. A success response is empty:
<ide> where `NetworkID` and `EndpointID` have meanings as above. A success response is
<ide> When a sandbox is given an endpoint, the remote process shall receive a POST to the URL `NetworkDriver.Join` of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "EndpointID": string,
<del> "SandboxKey": string,
<del> "Options": { ... }
<add> "NetworkID": string,
<add> "EndpointID": string,
<add> "SandboxKey": string,
<add> "Options": { ... }
<ide> }
<ide>
<ide> The `NetworkID` and `EndpointID` have meanings as above. The `SandboxKey` identifies the sandbox. `Options` is an arbitrary map as supplied to the proxy.
<ide>
<ide> The response must have the form
<ide>
<ide> {
<del> "InterfaceNames": [{
<del> SrcName: string,
<del> DstPrefix: string
<del> }, ...],
<del> "Gateway": string,
<del> "GatewayIPv6": string,
<del> "StaticRoutes": [{
<del> "Destination": string,
<del> "RouteType": int,
<del> "NextHop": string,
<del> "InterfaceID": int
<del> }, ...]
<del> "HostsPath": string,
<del> "ResolvConfPath": string
<add> "InterfaceName": {
<add> SrcName: string,
<add> DstPrefix: string
<add> },
<add> "Gateway": string,
<add> "GatewayIPv6": string,
<add> "StaticRoutes": [{
<add> "Destination": string,
<add> "RouteType": int,
<add> "NextHop": string,
<add> }, ...]
<ide> }
<ide>
<del>`Gateway` is optional and if supplied is an IP address as a string; e.g., `"192.168.0.1"`. `GatewayIPv6` is optional and if supplied is an IPv6 address as a string; e.g., `"fe80::7809:baff:fec6:7744"`. `HostsPath` is optional, as is `ResolvConfPath`.
<add>`Gateway` is optional and if supplied is an IP address as a string; e.g., `"192.168.0.1"`. `GatewayIPv6` is optional and if supplied is an IPv6 address as a string; e.g., `"fe80::7809:baff:fec6:7744"`.
<ide>
<del>The entries in `InterfaceNames` represent veths that should be moved by LibNetwork into the sandbox; the `SrcName` is the name of the veth that the remote process created, and the `DstPrefix` is a prefix for the name the veth should have after it has been moved into the sandbox (LibNetwork will append an index to make sure the actual name does not collide with others).
<add>The entries in `InterfaceName` represent actual OS level interfaces that should be moved by LibNetwork into the sandbox; the `SrcName` is the name of the OS level interface that the remote process created, and the `DstPrefix` is a prefix for the name the OS level interface should have after it has been moved into the sandbox (LibNetwork will append an index to make sure the actual name does not collide with others).
<ide>
<del>The position of the entries in the list must correspond to the interface IDs given in the response to `/NetworkDriver.CreateEndpoint` as described above. For example, if there were two `Interfaces` in the create endpoint response, with IDs `0` and `1`, then the `InterfaceNames` list would have the interface names respectively in positions `0` and `1`of the list. (For this reason it is recommended that interfaces are given sequential IDs starting with `0`.)
<del>
<del>The entries in `"StaticRoutes"` represent routes that should be added to an interface once it has been moved into the sandbox. Since there may be zero or more routes for an interface, unlike the interface names they can be supplied in any order, and are marked with the `InterfaceID` of the corresponding interface.
<add>The entries in `"StaticRoutes"` represent routes that should be added to an interface once it has been moved into the sandbox. Since there may be zero or more routes for an interface, unlike the interface name they can be supplied in any order.
<ide>
<ide> Routes are either given a `RouteType` of `0` and a value for `NextHop`; or, a `RouteType` of `1` and no value for `NextHop`, meaning a connected route.
<ide>
<ide> Routes are either given a `RouteType` of `0` and a value for `NextHop`; or, a `R
<ide> If the proxy is asked to remove an endpoint from a sandbox, the remote process shall receive a POST to the URL `/NetworkDriver.Leave` of the form
<ide>
<ide> {
<del> "NetworkID": string,
<del> "EndpointID": string
<add> "NetworkID": string,
<add> "EndpointID": string
<ide> }
<ide>
<ide> where `NetworkID` and `EndpointID` have meanings as above. The success response is empty:
<ide><path>libnetwork/driverapi/driverapi.go
<ide> type Driver interface {
<ide>
<ide> // EndpointInfo provides a go interface to fetch or populate endpoint assigned network resources.
<ide> type EndpointInfo interface {
<del> // Interfaces returns a list of interfaces bound to the endpoint.
<del> // If the list is not empty the driver is only expected to consume the interfaces.
<del> // It is an error to try to add interfaces to a non-empty list.
<del> // If the list is empty the driver is expected to populate with 0 or more interfaces.
<del> Interfaces() []InterfaceInfo
<del>
<del> // AddInterface is used by the driver to add an interface to the interface list.
<del> // This method will return an error if the driver attempts to add interfaces
<del> // if the Interfaces() method returned a non-empty list.
<del> // ID field need only have significance within the endpoint so it can be a simple
<del> // monotonically increasing number
<del> AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error
<add> // Interface returns the interface bound to the endpoint.
<add> // If the value is not nil the driver is only expected to consume the interface.
<add> // It is an error to try to add interface if the passed down value is non-nil
<add> // If the value is nil the driver is expected to add an interface
<add> Interface() InterfaceInfo
<add>
<add> // AddInterface is used by the driver to add an interface for the endpoint.
<add> // This method will return an error if the driver attempts to add interface
<add> // if the Interface() method returned a non-nil value.
<add> AddInterface(mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error
<ide> }
<ide>
<ide> // InterfaceInfo provides a go interface for drivers to retrive
<ide> type InterfaceInfo interface {
<ide>
<ide> // AddressIPv6 returns the IPv6 address.
<ide> AddressIPv6() net.IPNet
<del>
<del> // ID returns the numerical id of the interface and has significance only within
<del> // the endpoint.
<del> ID() int
<ide> }
<ide>
<ide> // InterfaceNameInfo provides a go interface for the drivers to assign names
<ide> // to interfaces.
<ide> type InterfaceNameInfo interface {
<ide> // SetNames method assigns the srcName and dstPrefix for the interface.
<ide> SetNames(srcName, dstPrefix string) error
<del>
<del> // ID returns the numerical id that was assigned to the interface by the driver
<del> // CreateEndpoint.
<del> ID() int
<ide> }
<ide>
<ide> // JoinInfo represents a set of resources that the driver has the ability to provide during
<ide> // join time.
<ide> type JoinInfo interface {
<del> // InterfaceNames returns a list of InterfaceNameInfo go interface to facilitate
<del> // setting the names for the interfaces.
<del> InterfaceNames() []InterfaceNameInfo
<add> // InterfaceName returns a InterfaceNameInfo go interface to facilitate
<add> // setting the names for the interface.
<add> InterfaceName() InterfaceNameInfo
<ide>
<ide> // SetGateway sets the default IPv4 gateway when a container joins the endpoint.
<ide> SetGateway(net.IP) error
<ide> type JoinInfo interface {
<ide>
<ide> // AddStaticRoute adds a routes to the sandbox.
<ide> // It may be used in addtion to or instead of a default gateway (as above).
<del> AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP, interfaceID int) error
<add> AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error
<ide> }
<ide>
<ide> // DriverCallback provides a Callback interface for Drivers into LibNetwork
<ide><path>libnetwork/drivers/bridge/bridge.go
<ide> const (
<ide> vethLen = 7
<ide> containerVethPrefix = "eth"
<ide> maxAllocatePortAttempts = 10
<del> ifaceID = 1
<ide> )
<ide>
<ide> var (
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo,
<ide> return errors.New("invalid endpoint info passed")
<ide> }
<ide>
<del> if len(epInfo.Interfaces()) != 0 {
<del> return errors.New("non empty interface list passed to bridge(local) driver")
<add> if epInfo.Interface() != nil {
<add> return errors.New("non-nil interface passed to bridge(local) driver")
<ide> }
<ide>
<ide> // Get the network handler and make sure it exists
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo,
<ide> endpoint.addrv6 = ipv6Addr
<ide> }
<ide>
<del> err = epInfo.AddInterface(ifaceID, endpoint.macAddress, *ipv4Addr, *ipv6Addr)
<add> err = epInfo.AddInterface(endpoint.macAddress, *ipv4Addr, *ipv6Addr)
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
<ide> return EndpointNotFoundError(eid)
<ide> }
<ide>
<del> for _, iNames := range jinfo.InterfaceNames() {
<del> // Make sure to set names on the correct interface ID.
<del> if iNames.ID() == ifaceID {
<del> err = iNames.SetNames(endpoint.srcName, containerVethPrefix)
<del> if err != nil {
<del> return err
<del> }
<del> }
<add> iNames := jinfo.InterfaceName()
<add> err = iNames.SetNames(endpoint.srcName, containerVethPrefix)
<add> if err != nil {
<add> return err
<ide> }
<ide>
<ide> err = jinfo.SetGateway(network.bridge.gatewayIPv4)
<ide><path>libnetwork/drivers/bridge/bridge_test.go
<ide> func TestCreateFullOptions(t *testing.T) {
<ide>
<ide> // Verify the IP address allocated for the endpoint belongs to the container network
<ide> epOptions := make(map[string]interface{})
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep1", te, epOptions)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create an endpoint : %s", err.Error())
<ide> }
<del> if !cnw.Contains(te.Interfaces()[0].Address().IP) {
<del> t.Fatalf("endpoint got assigned address outside of container network(%s): %s", cnw.String(), te.Interfaces()[0].Address())
<add>
<add> if !cnw.Contains(te.Interface().Address().IP) {
<add> t.Fatalf("endpoint got assigned address outside of container network(%s): %s", cnw.String(), te.Interface().Address())
<ide> }
<ide> }
<ide>
<ide> func verifyV4INCEntries(networks map[string]*bridgeNetwork, numEntries int, t *t
<ide> }
<ide>
<ide> type testInterface struct {
<del> id int
<ide> mac net.HardwareAddr
<ide> addr net.IPNet
<ide> addrv6 net.IPNet
<ide> type testInterface struct {
<ide> }
<ide>
<ide> type testEndpoint struct {
<del> ifaces []*testInterface
<add> iface *testInterface
<ide> gw net.IP
<ide> gw6 net.IP
<ide> hostsPath string
<ide> resolvConfPath string
<ide> routes []types.StaticRoute
<ide> }
<ide>
<del>func (te *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
<del> iList := make([]driverapi.InterfaceInfo, len(te.ifaces))
<del>
<del> for i, iface := range te.ifaces {
<del> iList[i] = iface
<add>func (te *testEndpoint) Interface() driverapi.InterfaceInfo {
<add> if te.iface != nil {
<add> return te.iface
<ide> }
<ide>
<del> return iList
<del>}
<del>
<del>func (te *testEndpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<del> iface := &testInterface{id: id, addr: ipv4, addrv6: ipv6}
<del> te.ifaces = append(te.ifaces, iface)
<ide> return nil
<ide> }
<ide>
<del>func (i *testInterface) ID() int {
<del> return i.id
<add>func (te *testEndpoint) AddInterface(mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<add> iface := &testInterface{addr: ipv4, addrv6: ipv6}
<add> te.iface = iface
<add> return nil
<ide> }
<ide>
<ide> func (i *testInterface) MacAddress() net.HardwareAddr {
<ide> func (i *testInterface) SetNames(srcName string, dstName string) error {
<ide> return nil
<ide> }
<ide>
<del>func (te *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
<del> iList := make([]driverapi.InterfaceNameInfo, len(te.ifaces))
<del>
<del> for i, iface := range te.ifaces {
<del> iList[i] = iface
<add>func (te *testEndpoint) InterfaceName() driverapi.InterfaceNameInfo {
<add> if te.iface != nil {
<add> return te.iface
<ide> }
<ide>
<del> return iList
<add> return nil
<ide> }
<ide>
<ide> func (te *testEndpoint) SetGateway(gw net.IP) error {
<ide> func (te *testEndpoint) SetGatewayIPv6(gw6 net.IP) error {
<ide> return nil
<ide> }
<ide>
<del>func (te *testEndpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP, interfaceID int) error {
<del> te.routes = append(te.routes, types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop, InterfaceID: interfaceID})
<add>func (te *testEndpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error {
<add> te.routes = append(te.routes, types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop})
<ide> return nil
<ide> }
<ide>
<ide> func testQueryEndpointInfo(t *testing.T, ulPxyEnabled bool) {
<ide> epOptions := make(map[string]interface{})
<ide> epOptions[netlabel.PortMap] = portMappings
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("net1", "ep1", te, epOptions)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create an endpoint : %s", err.Error())
<ide> func TestCreateLinkWithOptions(t *testing.T) {
<ide> epOptions := make(map[string]interface{})
<ide> epOptions[netlabel.MacAddress] = mac
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("net1", "ep", te, epOptions)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create an endpoint: %s", err.Error())
<ide> func TestCreateLinkWithOptions(t *testing.T) {
<ide> t.Fatalf("Failed to join the endpoint: %v", err)
<ide> }
<ide>
<del> ifaceName := te.ifaces[0].srcName
<add> ifaceName := te.iface.srcName
<ide> veth, err := netlink.LinkByName(ifaceName)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> func TestLinkContainers(t *testing.T) {
<ide> epOptions := make(map[string]interface{})
<ide> epOptions[netlabel.ExposedPorts] = exposedPorts
<ide>
<del> te1 := &testEndpoint{ifaces: []*testInterface{}}
<add> te1 := &testEndpoint{}
<ide> err = d.CreateEndpoint("net1", "ep1", te1, epOptions)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create an endpoint : %s", err.Error())
<ide> }
<ide>
<del> addr1 := te1.ifaces[0].addr
<add> addr1 := te1.iface.addr
<ide> if addr1.IP.To4() == nil {
<ide> t.Fatalf("No Ipv4 address assigned to the endpoint: ep1")
<ide> }
<ide>
<del> te2 := &testEndpoint{ifaces: []*testInterface{}}
<add> te2 := &testEndpoint{}
<ide> err = d.CreateEndpoint("net1", "ep2", te2, nil)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create an endpoint : %s", err.Error())
<ide> }
<ide>
<del> addr2 := te2.ifaces[0].addr
<add> addr2 := te2.iface.addr
<ide> if addr2.IP.To4() == nil {
<ide> t.Fatalf("No Ipv4 address assigned to the endpoint: ep2")
<ide> }
<ide> func TestSetDefaultGw(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep", te, nil)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create endpoint: %v", err)
<ide><path>libnetwork/drivers/bridge/network_test.go
<ide> func TestLinkCreate(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "", te, nil)
<ide> if err != nil {
<ide> if _, ok := err.(InvalidEndpointIDError); !ok {
<ide> func TestLinkCreate(t *testing.T) {
<ide> }
<ide>
<ide> // Verify sbox endoint interface inherited MTU value from bridge config
<del> sboxLnk, err := netlink.LinkByName(te.ifaces[0].srcName)
<add> sboxLnk, err := netlink.LinkByName(te.iface.srcName)
<ide> if err != nil {
<ide> t.Fatal(err)
<ide> }
<ide> func TestLinkCreate(t *testing.T) {
<ide> // TODO: if we could get peer name from (sboxLnk.(*netlink.Veth)).PeerName
<ide> // then we could check the MTU on hostLnk as well.
<ide>
<del> te1 := &testEndpoint{ifaces: []*testInterface{}}
<add> te1 := &testEndpoint{iface: &testInterface{}}
<ide> err = d.CreateEndpoint("dummy", "ep", te1, nil)
<ide> if err == nil {
<ide> t.Fatalf("Failed to detect duplicate endpoint id on same network")
<ide> }
<ide>
<del> if len(te.ifaces) != 1 {
<del> t.Fatalf("Expected exactly one interface. Instead got %d interface(s)", len(te.ifaces))
<del> }
<del>
<del> if te.ifaces[0].dstName == "" {
<add> if te.iface.dstName == "" {
<ide> t.Fatal("Invalid Dstname returned")
<ide> }
<ide>
<del> _, err = netlink.LinkByName(te.ifaces[0].srcName)
<add> _, err = netlink.LinkByName(te.iface.srcName)
<ide> if err != nil {
<del> t.Fatalf("Could not find source link %s: %v", te.ifaces[0].srcName, err)
<add> t.Fatalf("Could not find source link %s: %v", te.iface.srcName, err)
<ide> }
<ide>
<ide> n, ok := dr.networks["dummy"]
<ide> if !ok {
<ide> t.Fatalf("Cannot find network %s inside driver", "dummy")
<ide> }
<del> ip := te.ifaces[0].addr.IP
<add> ip := te.iface.addr.IP
<ide> if !n.bridge.bridgeIPv4.Contains(ip) {
<ide> t.Fatalf("IP %s is not a valid ip in the subnet %s", ip.String(), n.bridge.bridgeIPv4.String())
<ide> }
<ide>
<del> ip6 := te.ifaces[0].addrv6.IP
<add> ip6 := te.iface.addrv6.IP
<ide> if !n.bridge.bridgeIPv6.Contains(ip6) {
<ide> t.Fatalf("IP %s is not a valid ip in the subnet %s", ip6.String(), bridgeIPv6.String())
<ide> }
<ide> func TestLinkCreateTwo(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te1 := &testEndpoint{ifaces: []*testInterface{}}
<add> te1 := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep", te1, nil)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create a link: %s", err.Error())
<ide> }
<ide>
<del> te2 := &testEndpoint{ifaces: []*testInterface{}}
<add> te2 := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep", te2, nil)
<ide> if err != nil {
<ide> if _, ok := err.(driverapi.ErrEndpointExists); !ok {
<ide> func TestLinkCreateNoEnableIPv6(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep", te, nil)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create a link: %s", err.Error())
<ide> }
<ide>
<del> interfaces := te.ifaces
<del> if interfaces[0].addrv6.IP.To16() != nil {
<del> t.Fatalf("Expectd IPv6 address to be nil when IPv6 is not enabled. Got IPv6 = %s", interfaces[0].addrv6.String())
<add> iface := te.iface
<add> if iface.addrv6.IP.To16() != nil {
<add> t.Fatalf("Expectd IPv6 address to be nil when IPv6 is not enabled. Got IPv6 = %s", iface.addrv6.String())
<ide> }
<ide>
<ide> if te.gw6.To16() != nil {
<ide> func TestLinkDelete(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep1", te, nil)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create a link: %s", err.Error())
<ide><path>libnetwork/drivers/bridge/port_mapping_test.go
<ide> func TestPortMappingConfig(t *testing.T) {
<ide> t.Fatalf("Failed to create bridge: %v", err)
<ide> }
<ide>
<del> te := &testEndpoint{ifaces: []*testInterface{}}
<add> te := &testEndpoint{}
<ide> err = d.CreateEndpoint("dummy", "ep1", te, epOptions)
<ide> if err != nil {
<ide> t.Fatalf("Failed to create the endpoint: %s", err.Error())
<ide><path>libnetwork/drivers/overlay/joinleave.go
<ide> func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
<ide> return fmt.Errorf("could not set mac address to the container interface: %v", err)
<ide> }
<ide>
<del> for _, iNames := range jinfo.InterfaceNames() {
<del> // Make sure to set names on the correct interface ID.
<del> if iNames.ID() == 1 {
<del> err = iNames.SetNames(name2, "eth")
<del> if err != nil {
<del> return err
<del> }
<add> if iNames := jinfo.InterfaceName(); iNames != nil {
<add> err = iNames.SetNames(name2, "eth")
<add> if err != nil {
<add> return err
<ide> }
<ide> }
<ide>
<ide><path>libnetwork/drivers/overlay/ov_endpoint.go
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo,
<ide> id: eid,
<ide> }
<ide>
<del> if epInfo != nil && (len(epInfo.Interfaces()) > 0) {
<del> addr := epInfo.Interfaces()[0].Address()
<add> if epInfo != nil && epInfo.Interface() != nil {
<add> addr := epInfo.Interface().Address()
<ide> ep.addr = &addr
<del> ep.mac = epInfo.Interfaces()[0].MacAddress()
<add> ep.mac = epInfo.Interface().MacAddress()
<ide> n.addEndpoint(ep)
<ide> return nil
<ide> }
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo,
<ide>
<ide> ep.mac = netutils.GenerateMACFromIP(ep.addr.IP)
<ide>
<del> err = epInfo.AddInterface(1, ep.mac, *ep.addr, net.IPNet{})
<add> err = epInfo.AddInterface(ep.mac, *ep.addr, net.IPNet{})
<ide> if err != nil {
<ide> return fmt.Errorf("could not add interface to endpoint info: %v", err)
<ide> }
<ide><path>libnetwork/drivers/remote/api/api.go
<ide> type CreateEndpointRequest struct {
<ide> NetworkID string
<ide> // The ID of the endpoint for later reference.
<ide> EndpointID string
<del> Interfaces []*EndpointInterface
<add> Interface *EndpointInterface
<ide> Options map[string]interface{}
<ide> }
<ide>
<ide> // EndpointInterface represents an interface endpoint.
<ide> type EndpointInterface struct {
<del> ID int
<ide> Address string
<ide> AddressIPv6 string
<ide> MacAddress string
<ide> type EndpointInterface struct {
<ide> // CreateEndpointResponse is the response to the CreateEndpoint action.
<ide> type CreateEndpointResponse struct {
<ide> Response
<del> Interfaces []*EndpointInterface
<add> Interface *EndpointInterface
<ide> }
<ide>
<ide> // Interface is the representation of a linux interface.
<ide> type Interface struct {
<del> ID int
<ide> Address *net.IPNet
<ide> AddressIPv6 *net.IPNet
<ide> MacAddress net.HardwareAddr
<ide> type StaticRoute struct {
<ide> Destination string
<ide> RouteType int
<ide> NextHop string
<del> InterfaceID int
<ide> }
<ide>
<ide> // JoinResponse is the response to a JoinRequest.
<ide> type JoinResponse struct {
<ide> Response
<del> InterfaceNames []*InterfaceName
<del> Gateway string
<del> GatewayIPv6 string
<del> StaticRoutes []StaticRoute
<add> InterfaceName *InterfaceName
<add> Gateway string
<add> GatewayIPv6 string
<add> StaticRoutes []StaticRoute
<ide> }
<ide>
<ide> // LeaveRequest describes the API for detaching an endpoint from a sandbox.
<ide><path>libnetwork/drivers/remote/driver.go
<ide> func (d *driver) DeleteNetwork(nid string) error {
<ide> }
<ide>
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo, epOptions map[string]interface{}) error {
<add> var reqIface *api.EndpointInterface
<add>
<ide> if epInfo == nil {
<ide> return fmt.Errorf("must not be called with nil EndpointInfo")
<ide> }
<ide>
<del> reqIfaces := make([]*api.EndpointInterface, len(epInfo.Interfaces()))
<del> for i, iface := range epInfo.Interfaces() {
<add> iface := epInfo.Interface()
<add> if iface != nil {
<ide> addr4 := iface.Address()
<ide> addr6 := iface.AddressIPv6()
<del> reqIfaces[i] = &api.EndpointInterface{
<del> ID: iface.ID(),
<add> reqIface = &api.EndpointInterface{
<ide> Address: addr4.String(),
<ide> AddressIPv6: addr6.String(),
<ide> MacAddress: iface.MacAddress().String(),
<ide> func (d *driver) CreateEndpoint(nid, eid string, epInfo driverapi.EndpointInfo,
<ide> create := &api.CreateEndpointRequest{
<ide> NetworkID: nid,
<ide> EndpointID: eid,
<del> Interfaces: reqIfaces,
<add> Interface: reqIface,
<ide> Options: epOptions,
<ide> }
<ide> var res api.CreateEndpointResponse
<ide> if err := d.call("CreateEndpoint", create, &res); err != nil {
<ide> return err
<ide> }
<ide>
<del> ifaces, err := parseInterfaces(res)
<add> inIface, err := parseInterface(res)
<ide> if err != nil {
<ide> return err
<ide> }
<del> if len(reqIfaces) > 0 && len(ifaces) > 0 {
<del> // We're not supposed to add interfaces if there already are
<del> // some. Attempt to roll back
<del> return errorWithRollback("driver attempted to add more interfaces", d.DeleteEndpoint(nid, eid))
<add> if reqIface != nil && inIface != nil {
<add> // We're not supposed to add interface if there is already
<add> // one. Attempt to roll back
<add> return errorWithRollback("driver attempted to add interface ignoring the one provided", d.DeleteEndpoint(nid, eid))
<ide> }
<del> for _, iface := range ifaces {
<add>
<add> if inIface != nil {
<ide> var addr4, addr6 net.IPNet
<del> if iface.Address != nil {
<del> addr4 = *(iface.Address)
<add> if inIface.Address != nil {
<add> addr4 = *(inIface.Address)
<ide> }
<del> if iface.AddressIPv6 != nil {
<del> addr6 = *(iface.AddressIPv6)
<add> if inIface.AddressIPv6 != nil {
<add> addr6 = *(inIface.AddressIPv6)
<ide> }
<del> if err := epInfo.AddInterface(iface.ID, iface.MacAddress, addr4, addr6); err != nil {
<del> return errorWithRollback(fmt.Sprintf("failed to AddInterface %v: %s", iface, err), d.DeleteEndpoint(nid, eid))
<add> if err := epInfo.AddInterface(inIface.MacAddress, addr4, addr6); err != nil {
<add> return errorWithRollback(fmt.Sprintf("failed to AddInterface %v: %s", inIface, err), d.DeleteEndpoint(nid, eid))
<ide> }
<ide> }
<ide> return nil
<ide> func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
<ide> return err
<ide> }
<ide>
<del> // Expect each interface ID given by CreateEndpoint to have an
<del> // entry at that index in the names supplied here. In other words,
<del> // if you supply 0..n interfaces with IDs 0..n above, you should
<del> // supply the names in the same order.
<del> ifaceNames := res.InterfaceNames
<del> for _, iface := range jinfo.InterfaceNames() {
<del> i := iface.ID()
<del> if i >= len(ifaceNames) || i < 0 {
<del> return fmt.Errorf("no correlating interface %d in supplied interface names", i)
<del> }
<del> supplied := ifaceNames[i]
<del> if err := iface.SetNames(supplied.SrcName, supplied.DstPrefix); err != nil {
<add> ifaceName := res.InterfaceName
<add> if ifaceName == nil {
<add> return fmt.Errorf("no interface name information received")
<add> }
<add>
<add> if iface := jinfo.InterfaceName(); iface != nil {
<add> if err := iface.SetNames(ifaceName.SrcName, ifaceName.DstPrefix); err != nil {
<ide> return errorWithRollback(fmt.Sprintf("failed to set interface name: %s", err), d.Leave(nid, eid))
<ide> }
<ide> }
<ide> func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
<ide> return err
<ide> }
<ide> for _, route := range routes {
<del> if jinfo.AddStaticRoute(route.Destination, route.RouteType, route.NextHop, route.InterfaceID) != nil {
<add> if jinfo.AddStaticRoute(route.Destination, route.RouteType, route.NextHop) != nil {
<ide> return errorWithRollback(fmt.Sprintf("failed to set static route: %v", route), d.Leave(nid, eid))
<ide> }
<ide> }
<ide> func parseStaticRoutes(r api.JoinResponse) ([]*types.StaticRoute, error) {
<ide> var routes = make([]*types.StaticRoute, len(r.StaticRoutes))
<ide> for i, inRoute := range r.StaticRoutes {
<ide> var err error
<del> outRoute := &types.StaticRoute{InterfaceID: inRoute.InterfaceID, RouteType: inRoute.RouteType}
<add> outRoute := &types.StaticRoute{RouteType: inRoute.RouteType}
<ide>
<ide> if inRoute.Destination != "" {
<ide> if outRoute.Destination, err = toAddr(inRoute.Destination); err != nil {
<ide> func parseStaticRoutes(r api.JoinResponse) ([]*types.StaticRoute, error) {
<ide> }
<ide>
<ide> // parseInterfaces validates all the parameters of an Interface and returns them.
<del>func parseInterfaces(r api.CreateEndpointResponse) ([]*api.Interface, error) {
<del> var (
<del> Interfaces = make([]*api.Interface, len(r.Interfaces))
<del> )
<del> for i, inIf := range r.Interfaces {
<add>func parseInterface(r api.CreateEndpointResponse) (*api.Interface, error) {
<add> var outIf *api.Interface
<add>
<add> inIf := r.Interface
<add> if inIf != nil {
<ide> var err error
<del> outIf := &api.Interface{ID: inIf.ID}
<add> outIf = &api.Interface{}
<ide> if inIf.Address != "" {
<ide> if outIf.Address, err = toAddr(inIf.Address); err != nil {
<ide> return nil, err
<ide> func parseInterfaces(r api.CreateEndpointResponse) ([]*api.Interface, error) {
<ide> return nil, err
<ide> }
<ide> }
<del> Interfaces[i] = outIf
<ide> }
<del> return Interfaces, nil
<add>
<add> return outIf, nil
<ide> }
<ide>
<ide> func toAddr(ipAddr string) (*net.IPNet, error) {
<ide><path>libnetwork/drivers/remote/driver_test.go
<ide> func setupPlugin(t *testing.T, name string, mux *http.ServeMux) func() {
<ide>
<ide> type testEndpoint struct {
<ide> t *testing.T
<del> id int
<ide> src string
<ide> dst string
<ide> address string
<ide> type testEndpoint struct {
<ide> routeType int
<ide> }
<ide>
<del>func (test *testEndpoint) Interfaces() []driverapi.InterfaceInfo {
<del> // return an empty one so we don't trip the check for existing
<del> // interfaces; we don't care about this after that
<del> return []driverapi.InterfaceInfo{}
<add>func (test *testEndpoint) Interface() driverapi.InterfaceInfo {
<add> return nil
<ide> }
<ide>
<del>func (test *testEndpoint) AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<del> if ID != test.id {
<del> test.t.Fatalf("Wrong ID passed to AddInterface: %d", ID)
<del> }
<add>func (test *testEndpoint) AddInterface(mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<ide> ip4, net4, _ := net.ParseCIDR(test.address)
<ide> ip6, net6, _ := net.ParseCIDR(test.addressIPv6)
<ide> if ip4 != nil {
<ide> func (test *testEndpoint) AddInterface(ID int, mac net.HardwareAddr, ipv4 net.IP
<ide> return nil
<ide> }
<ide>
<del>func (test *testEndpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
<del> return []driverapi.InterfaceNameInfo{test}
<add>func (test *testEndpoint) InterfaceName() driverapi.InterfaceNameInfo {
<add> return test
<ide> }
<ide>
<ide> func compareIPs(t *testing.T, kind string, shouldBe string, supplied net.IP) {
<ide> func (test *testEndpoint) SetNames(src string, dst string) error {
<ide> return nil
<ide> }
<ide>
<del>func (test *testEndpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP, interfaceID int) error {
<add>func (test *testEndpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error {
<ide> compareIPNets(test.t, "Destination", test.destination, *destination)
<ide> compareIPs(test.t, "NextHop", test.nextHop, nextHop)
<ide>
<ide> if test.routeType != routeType {
<ide> test.t.Fatalf(`Wrong RouteType; expected "%d", got "%d"`, test.routeType, routeType)
<ide> }
<ide>
<del> if test.id != interfaceID {
<del> test.t.Fatalf(`Wrong InterfaceID; expected "%d", got "%d"`, test.id, interfaceID)
<del> }
<del>
<ide> return nil
<ide> }
<ide>
<del>func (test *testEndpoint) ID() int {
<del> return test.id
<del>}
<del>
<ide> func TestRemoteDriver(t *testing.T) {
<ide> var plugin = "test-net-driver"
<ide>
<ide> func TestRemoteDriver(t *testing.T) {
<ide> })
<ide> handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
<ide> iface := map[string]interface{}{
<del> "ID": ep.id,
<ide> "Address": ep.address,
<ide> "AddressIPv6": ep.addressIPv6,
<ide> "MacAddress": ep.macAddress,
<ide> }
<ide> return map[string]interface{}{
<del> "Interfaces": []interface{}{iface},
<add> "Interface": iface,
<ide> }
<ide> })
<ide> handle(t, mux, "Join", func(msg map[string]interface{}) interface{} {
<ide> func TestRemoteDriver(t *testing.T) {
<ide> "GatewayIPv6": ep.gatewayIPv6,
<ide> "HostsPath": ep.hostsPath,
<ide> "ResolvConfPath": ep.resolvConfPath,
<del> "InterfaceNames": []map[string]interface{}{
<del> map[string]interface{}{
<del> "SrcName": ep.src,
<del> "DstPrefix": ep.dst,
<del> },
<add> "InterfaceName": map[string]interface{}{
<add> "SrcName": ep.src,
<add> "DstPrefix": ep.dst,
<ide> },
<ide> "StaticRoutes": []map[string]interface{}{
<ide> map[string]interface{}{
<ide> "Destination": ep.destination,
<ide> "RouteType": ep.routeType,
<del> "InterfaceID": ep.id,
<ide> "NextHop": ep.nextHop,
<ide> },
<ide> },
<ide> func TestMissingValues(t *testing.T) {
<ide> defer setupPlugin(t, plugin, mux)()
<ide>
<ide> ep := &testEndpoint{
<del> t: t,
<del> id: 0,
<add> t: t,
<ide> }
<ide>
<ide> handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
<ide> iface := map[string]interface{}{
<del> "ID": ep.id,
<ide> "Address": ep.address,
<ide> "AddressIPv6": ep.addressIPv6,
<ide> "MacAddress": ep.macAddress,
<ide> func TestMissingValues(t *testing.T) {
<ide> type rollbackEndpoint struct {
<ide> }
<ide>
<del>func (r *rollbackEndpoint) Interfaces() []driverapi.InterfaceInfo {
<del> return []driverapi.InterfaceInfo{}
<add>func (r *rollbackEndpoint) Interface() driverapi.InterfaceInfo {
<add> return nil
<ide> }
<ide>
<del>func (r *rollbackEndpoint) AddInterface(_ int, _ net.HardwareAddr, _ net.IPNet, _ net.IPNet) error {
<add>func (r *rollbackEndpoint) AddInterface(_ net.HardwareAddr, _ net.IPNet, _ net.IPNet) error {
<ide> return fmt.Errorf("fail this to trigger a rollback")
<ide> }
<ide>
<ide> func TestRollback(t *testing.T) {
<ide>
<ide> handle(t, mux, "CreateEndpoint", func(msg map[string]interface{}) interface{} {
<ide> iface := map[string]interface{}{
<del> "ID": 0,
<ide> "Address": "192.168.4.5/16",
<ide> "AddressIPv6": "",
<ide> "MacAddress": "7a:12:34:56:78:90",
<ide> }
<ide> return map[string]interface{}{
<del> "Interfaces": []interface{}{iface},
<add> "Interface": interface{}(iface),
<ide> }
<ide> })
<ide> handle(t, mux, "DeleteEndpoint", func(msg map[string]interface{}) interface{} {
<ide><path>libnetwork/endpoint.go
<ide> type endpoint struct {
<ide> name string
<ide> id string
<ide> network *network
<del> iFaces []*endpointInterface
<add> iface *endpointInterface
<ide> joinInfo *endpointJoinInfo
<ide> sandboxID string
<ide> exposedPorts []types.TransportPort
<ide> func (ep *endpoint) MarshalJSON() ([]byte, error) {
<ide> epMap := make(map[string]interface{})
<ide> epMap["name"] = ep.name
<ide> epMap["id"] = ep.id
<del> epMap["ep_iface"] = ep.iFaces
<add> epMap["ep_iface"] = ep.iface
<ide> epMap["exposed_ports"] = ep.exposedPorts
<ide> epMap["generic"] = ep.generic
<ide> epMap["sandbox"] = ep.sandboxID
<ide> func (ep *endpoint) UnmarshalJSON(b []byte) (err error) {
<ide> ep.id = epMap["id"].(string)
<ide>
<ide> ib, _ := json.Marshal(epMap["ep_iface"])
<del> var ifaces []endpointInterface
<del> json.Unmarshal(ib, &ifaces)
<del> ep.iFaces = make([]*endpointInterface, 0)
<del> for _, iface := range ifaces {
<del> ep.iFaces = append(ep.iFaces, &iface)
<del> }
<add> json.Unmarshal(ib, ep.iface)
<ide>
<ide> tb, _ := json.Marshal(epMap["exposed_ports"])
<ide> var tPorts []types.TransportPort
<ide> func (ep *endpoint) hasInterface(iName string) bool {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> for _, iface := range ep.iFaces {
<del> if iface.srcName == iName {
<del> return true
<del> }
<del> }
<del>
<del> return false
<add> return ep.iface != nil && ep.iface.srcName == iName
<ide> }
<ide>
<ide> func (ep *endpoint) Leave(sbox Sandbox, options ...EndpointOption) error {
<ide> func (ep *endpoint) getFirstInterfaceAddress() net.IP {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> if len(ep.iFaces) != 0 && ep.iFaces[0] != nil {
<del> return ep.iFaces[0].addr.IP
<add> if ep.iface != nil {
<add> return ep.iface.addr.IP
<ide> }
<ide>
<ide> return nil
<ide><path>libnetwork/endpoint_info.go
<ide> import (
<ide>
<ide> // EndpointInfo provides an interface to retrieve network resources bound to the endpoint.
<ide> type EndpointInfo interface {
<del> // InterfaceList returns an interface list which were assigned to the endpoint
<del> // by the driver. This can be used after the endpoint has been created.
<del> InterfaceList() []InterfaceInfo
<add> // Iface returns InterfaceInfo, go interface that can be used
<add> // to get more information on the interface which was assigned to
<add> // the endpoint by the driver. This can be used after the
<add> // endpoint has been created.
<add> Iface() InterfaceInfo
<ide>
<ide> // Gateway returns the IPv4 gateway assigned by the driver.
<ide> // This will only return a valid value if a container has joined the endpoint.
<ide> type InterfaceInfo interface {
<ide> }
<ide>
<ide> type endpointInterface struct {
<del> id int
<ide> mac net.HardwareAddr
<ide> addr net.IPNet
<ide> addrv6 net.IPNet
<ide> type endpointInterface struct {
<ide>
<ide> func (epi *endpointInterface) MarshalJSON() ([]byte, error) {
<ide> epMap := make(map[string]interface{})
<del> epMap["id"] = epi.id
<ide> epMap["mac"] = epi.mac.String()
<ide> epMap["addr"] = epi.addr.String()
<ide> epMap["addrv6"] = epi.addrv6.String()
<ide> func (epi *endpointInterface) UnmarshalJSON(b []byte) (err error) {
<ide> if err := json.Unmarshal(b, &epMap); err != nil {
<ide> return err
<ide> }
<del> epi.id = int(epMap["id"].(float64))
<ide>
<ide> mac, _ := net.ParseMAC(epMap["mac"].(string))
<ide> epi.mac = mac
<ide> func (ep *endpoint) DriverInfo() (map[string]interface{}, error) {
<ide> return driver.EndpointOperInfo(nid, epid)
<ide> }
<ide>
<del>func (ep *endpoint) InterfaceList() []InterfaceInfo {
<add>func (ep *endpoint) Iface() InterfaceInfo {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> iList := make([]InterfaceInfo, len(ep.iFaces))
<del>
<del> for i, iface := range ep.iFaces {
<del> iList[i] = iface
<add> if ep.iface != nil {
<add> return ep.iface
<ide> }
<ide>
<del> return iList
<add> return nil
<ide> }
<ide>
<del>func (ep *endpoint) Interfaces() []driverapi.InterfaceInfo {
<add>func (ep *endpoint) Interface() driverapi.InterfaceInfo {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> iList := make([]driverapi.InterfaceInfo, len(ep.iFaces))
<del>
<del> for i, iface := range ep.iFaces {
<del> iList[i] = iface
<add> if ep.iface != nil {
<add> return ep.iface
<ide> }
<ide>
<del> return iList
<add> return nil
<ide> }
<ide>
<del>func (ep *endpoint) AddInterface(id int, mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<add>func (ep *endpoint) AddInterface(mac net.HardwareAddr, ipv4 net.IPNet, ipv6 net.IPNet) error {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<ide> iface := &endpointInterface{
<del> id: id,
<ide> addr: *types.GetIPNetCopy(&ipv4),
<ide> addrv6: *types.GetIPNetCopy(&ipv6),
<ide> }
<ide> iface.mac = types.GetMacCopy(mac)
<ide>
<del> ep.iFaces = append(ep.iFaces, iface)
<add> ep.iface = iface
<ide> return nil
<ide> }
<ide>
<del>func (epi *endpointInterface) ID() int {
<del> return epi.id
<del>}
<del>
<ide> func (epi *endpointInterface) MacAddress() net.HardwareAddr {
<ide> return types.GetMacCopy(epi.mac)
<ide> }
<ide> func (epi *endpointInterface) SetNames(srcName string, dstPrefix string) error {
<ide> return nil
<ide> }
<ide>
<del>func (ep *endpoint) InterfaceNames() []driverapi.InterfaceNameInfo {
<add>func (ep *endpoint) InterfaceName() driverapi.InterfaceNameInfo {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> iList := make([]driverapi.InterfaceNameInfo, len(ep.iFaces))
<del>
<del> for i, iface := range ep.iFaces {
<del> iList[i] = iface
<add> if ep.iface != nil {
<add> return ep.iface
<ide> }
<ide>
<del> return iList
<add> return nil
<ide> }
<ide>
<del>func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP, interfaceID int) error {
<add>func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error {
<ide> ep.Lock()
<ide> defer ep.Unlock()
<ide>
<del> r := types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop, InterfaceID: interfaceID}
<add> r := types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop}
<ide>
<ide> if routeType == types.NEXTHOP {
<ide> // If the route specifies a next-hop, then it's loosely routed (i.e. not bound to a particular interface).
<ide> func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHo
<ide> }
<ide>
<ide> func (ep *endpoint) addInterfaceRoute(route *types.StaticRoute) error {
<del> for _, iface := range ep.iFaces {
<del> if iface.id == route.InterfaceID {
<del> iface.routes = append(iface.routes, route.Destination)
<del> return nil
<del> }
<del> }
<del> return types.BadRequestErrorf("Interface with ID %d doesn't exist.",
<del> route.InterfaceID)
<add> ep.Lock()
<add> defer ep.Unlock()
<add>
<add> iface := ep.iface
<add> iface.routes = append(iface.routes, route.Destination)
<add> return nil
<ide> }
<ide>
<ide> func (ep *endpoint) Sandbox() Sandbox {
<ide><path>libnetwork/libnetwork_test.go
<ide> func TestEndpointJoin(t *testing.T) {
<ide>
<ide> // Validate if ep.Info() only gives me IP address info and not names and gateway during CreateEndpoint()
<ide> info := ep1.Info()
<del> for _, iface := range info.InterfaceList() {
<add> if iface := info.Iface(); iface != nil {
<ide> if iface.Address().IP.To4() == nil {
<ide> t.Fatalf("Invalid IP address returned: %v", iface.Address())
<ide> }
<ide><path>libnetwork/network.go
<ide> func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi
<ide> }
<ide>
<ide> ep := &endpoint{name: name,
<del> iFaces: []*endpointInterface{},
<ide> generic: make(map[string]interface{})}
<ide> ep.id = stringid.GenerateRandomID()
<ide> ep.network = n
<ide> func (n *network) isGlobalScoped() (bool, error) {
<ide> func (n *network) updateSvcRecord(ep *endpoint, isAdd bool) {
<ide> n.Lock()
<ide> var recs []etchosts.Record
<del> for _, iface := range ep.InterfaceList() {
<add> if iface := ep.Iface(); iface != nil {
<ide> if isAdd {
<ide> n.svcRecords[ep.Name()] = iface.Address().IP
<ide> n.svcRecords[ep.Name()+"."+n.name] = iface.Address().IP
<ide><path>libnetwork/sandbox.go
<ide> func (sb *sandbox) populateNetworkResources(ep *endpoint) error {
<ide>
<ide> ep.Lock()
<ide> joinInfo := ep.joinInfo
<del> ifaces := ep.iFaces
<add> i := ep.iface
<ide> ep.Unlock()
<ide>
<del> for _, i := range ifaces {
<add> if i != nil {
<ide> var ifaceOptions []osl.IfaceOption
<ide>
<ide> ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().Address(&i.addr), sb.osSbox.InterfaceOptions().Routes(i.routes))
<ide><path>libnetwork/sandbox_test.go
<ide> func createEmptyCtrlr() *controller {
<ide> return &controller{sandboxes: sandboxTable{}}
<ide> }
<ide>
<del>func createEmptyEndpoint() *endpoint {
<del> return &endpoint{
<del> joinInfo: &endpointJoinInfo{},
<del> iFaces: []*endpointInterface{},
<del> }
<del>}
<del>
<ide> func getTestEnv(t *testing.T) (NetworkController, Network, Network) {
<ide> c, err := New()
<ide> if err != nil {
<ide><path>libnetwork/types/types.go
<ide> type StaticRoute struct {
<ide>
<ide> // NextHop will be resolved by the kernel (i.e. as a loose hop).
<ide> NextHop net.IP
<del>
<del> // InterfaceID must refer to a defined interface on the
<del> // Endpoint to which the routes are specified. Routes specified this way
<del> // are interpreted as directly connected to the specified interface (no
<del> // next hop will be used).
<del> InterfaceID int
<ide> }
<ide>
<ide> // GetCopy returns a copy of this StaticRoute structure
<ide> func (r *StaticRoute) GetCopy() *StaticRoute {
<ide> d := GetIPNetCopy(r.Destination)
<ide> nh := GetIPCopy(r.NextHop)
<ide> return &StaticRoute{Destination: d,
<del> RouteType: r.RouteType,
<del> NextHop: nh,
<del> InterfaceID: r.InterfaceID}
<add> RouteType: r.RouteType,
<add> NextHop: nh,
<add> }
<ide> }
<ide>
<ide> /******************************
| 20
|
Javascript
|
Javascript
|
fix weird test issue
|
56b18081e079453454d537820e8033b92ab04bb2
|
<ide><path>src/test/moment/locale.js
<ide> test('return locale name', function (assert) {
<ide> var registered = moment.locale('return-this', {});
<ide>
<ide> assert.equal(registered, 'return-this', 'returns the locale configured');
<del> moment.locale('return-this', null);
<add> moment.defineLocale('return-this', null);
<ide> });
<ide>
<ide> test('changing the global locale doesn\'t affect existing instances', function (assert) {
<ide><path>src/test/moment/weekday.js
<ide> test('weekday first day of week Wednesday (dow 3)', function (assert) {
<ide> assert.equal(moment([1970, 0, 4]).weekday(), 4, 'Jan 4 1970 is Sunday -- 4th day');
<ide> assert.equal(moment([2001, 4, 14]).weekday(), 5, 'May 14 2001 is Monday -- 5th day');
<ide> assert.equal(moment([2000, 0, 4]).weekday(), 6, 'Jan 4 2000 is Tuesday -- 6th day');
<del> moment.locale('dow:3,doy:6', null);
<ide> });
<ide>
<ide> test('weekday first day of week Thursday (dow 4)', function (assert) {
| 2
|
Text
|
Text
|
add content into activity section
|
77d7a725be94c6ced155c2a3248680f22cda0cee
|
<ide><path>guide/english/android-development/core-components/index.md
<ide> An _activity_ is a component that has a user interface and represents a single s
<ide>
<ide> An activity facilitates the following key interactions between system and app:
<ide> - Keeping track of what the user currently cares about (what is on screen) to ensure that the system keeps running the process that is hosting the activity.
<add>- Knowing that previously used processes contain things the user may return to (stopped activities), and thus more highly prioritize keeping those processes around.
<add>- Helping the app handle having its process killed so the user can return to activities with their previous state restored.
<ide>
<ide>
<ide> #### [Activity Lifecycle](https://developer.android.com/guide/components/activities/activity-lifecycle)
| 1
|
Python
|
Python
|
enhance rewrite state_dict missing _metadata
|
bec02ff209d20d4bca974dab99a8c4e11f5fa737
|
<ide><path>src/transformers/modeling_utils.py
<ide> def save_pretrained(
<ide> # Handle the case where some state_dict keys shouldn't be saved
<ide> if self._keys_to_ignore_on_save is not None:
<ide> for ignore_key in self._keys_to_ignore_on_save:
<del> del state_dict[ignore_key]
<add> if ignore_key in state_dict.keys():
<add> del state_dict[ignore_key]
<ide>
<ide> # If we save using the predefined names, we can load using `from_pretrained`
<ide> output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
| 1
|
Javascript
|
Javascript
|
improve format performance
|
9752fce34d50400f8f4b5cbafbce2161b6e07a45
|
<ide><path>lib/util.js
<ide> function format(...args) {
<ide> return formatWithOptions(emptyOptions, ...args);
<ide> }
<ide>
<del>function formatValue(val, inspectOptions) {
<del> const inspectTypes = ['object', 'symbol', 'function', 'number'];
<del>
<del> if (inspectTypes.includes(typeof val)) {
<del> return inspect(val, inspectOptions);
<del> } else {
<del> return String(val);
<del> }
<del>}
<del>
<ide> function formatWithOptions(inspectOptions, ...args) {
<ide> const first = args[0];
<del> const parts = [];
<add> let a = 0;
<add> let str = '';
<add> let join = '';
<ide>
<del> const firstIsString = typeof first === 'string';
<del>
<del> if (firstIsString && args.length === 1) {
<del> return first;
<del> }
<del>
<del> if (firstIsString && /%[sjdOoif%]/.test(first)) {
<del> let i, tempStr;
<del> let str = '';
<del> let a = 1;
<add> if (typeof first === 'string') {
<add> if (args.length === 1) {
<add> return first;
<add> }
<add> let tempStr;
<ide> let lastPos = 0;
<ide>
<del> for (i = 0; i < first.length - 1; i++) {
<add> for (var i = 0; i < first.length - 1; i++) {
<ide> if (first.charCodeAt(i) === 37) { // '%'
<ide> const nextChar = first.charCodeAt(++i);
<del> if (a !== args.length) {
<add> if (a + 1 !== args.length) {
<ide> switch (nextChar) {
<ide> case 115: // 's'
<del> tempStr = String(args[a++]);
<add> tempStr = String(args[++a]);
<ide> break;
<ide> case 106: // 'j'
<del> tempStr = tryStringify(args[a++]);
<add> tempStr = tryStringify(args[++a]);
<ide> break;
<ide> case 100: // 'd'
<del> const tempNum = args[a++];
<add> const tempNum = args[++a];
<ide> // eslint-disable-next-line valid-typeof
<ide> if (typeof tempNum === 'bigint') {
<ide> tempStr = `${tempNum}n`;
<ide> function formatWithOptions(inspectOptions, ...args) {
<ide> }
<ide> break;
<ide> case 79: // 'O'
<del> tempStr = inspect(args[a++], inspectOptions);
<add> tempStr = inspect(args[++a], inspectOptions);
<ide> break;
<ide> case 111: // 'o'
<ide> {
<del> const opts = Object.assign({}, inspectOptions, {
<add> tempStr = inspect(args[++a], {
<add> ...inspectOptions,
<ide> showHidden: true,
<ide> showProxy: true,
<ide> depth: 4
<ide> });
<del> tempStr = inspect(args[a++], opts);
<ide> break;
<ide> }
<ide> case 105: // 'i'
<del> const tempInteger = args[a++];
<add> const tempInteger = args[++a];
<ide> // eslint-disable-next-line valid-typeof
<ide> if (typeof tempInteger === 'bigint') {
<ide> tempStr = `${tempInteger}n`;
<ide> function formatWithOptions(inspectOptions, ...args) {
<ide> }
<ide> break;
<ide> case 102: // 'f'
<del> const tempFloat = args[a++];
<add> const tempFloat = args[++a];
<ide> if (typeof tempFloat === 'symbol') {
<ide> tempStr = 'NaN';
<ide> } else {
<ide> function formatWithOptions(inspectOptions, ...args) {
<ide> }
<ide> }
<ide> }
<del> if (lastPos === 0) {
<del> str = first;
<del> } else if (lastPos < first.length) {
<del> str += first.slice(lastPos);
<del> }
<del>
<del> parts.push(str);
<del> while (a < args.length) {
<del> parts.push(formatValue(args[a], inspectOptions));
<add> if (lastPos !== 0) {
<ide> a++;
<del> }
<del> } else {
<del> for (const arg of args) {
<del> parts.push(formatValue(arg, inspectOptions));
<add> join = ' ';
<add> if (lastPos < first.length) {
<add> str += first.slice(lastPos);
<add> }
<ide> }
<ide> }
<ide>
<del> return parts.join(' ');
<add> while (a < args.length) {
<add> const value = args[a];
<add> // TODO(BridgeAR): This should apply for all besides strings. Especially
<add> // BigInt should be properly inspected.
<add> str += join;
<add> if (typeof value !== 'string' &&
<add> typeof value !== 'boolean' &&
<add> // eslint-disable-next-line valid-typeof
<add> typeof value !== 'bigint') {
<add> str += inspect(value, inspectOptions);
<add> } else {
<add> str += value;
<add> }
<add> join = ' ';
<add> a++;
<add> }
<add> return str;
<ide> }
<ide>
<ide> const debugs = {};
| 1
|
Javascript
|
Javascript
|
fix worker resolution on using minified version
|
010d38a8c0f6fad7f0b5e6955e5371a8121be87c
|
<ide><path>src/display/api.js
<ide> var PDFWorker = (function PDFWorkerClosure() {
<ide> if (typeof PDFJSDev !== 'undefined' &&
<ide> PDFJSDev.test('PRODUCTION && !(MOZCENTRAL || FIREFOX)') &&
<ide> pdfjsFilePath) {
<del> return pdfjsFilePath.replace(/\.js$/i, '.worker.js');
<add> return pdfjsFilePath.replace(/(\.(?:min\.)?js)$/i, '.worker$1');
<ide> }
<ide> error('No PDFJS.workerSrc specified');
<ide> }
| 1
|
Go
|
Go
|
move container to its own package
|
6bb0d1816acd8d4f7a542a6aac047da2b874f476
|
<ide><path>builder/builder.go
<ide> import (
<ide> "os"
<ide>
<ide> // TODO: remove dependency on daemon
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/runconfig"
<ide> type Docker interface {
<ide> // Pull tells Docker to pull image referenced by `name`.
<ide> Pull(name string) (*image.Image, error)
<ide>
<del> // TODO: move daemon.Container to its own package
<del>
<ide> // Container looks up a Docker container referenced by `id`.
<del> Container(id string) (*daemon.Container, error)
<add> Container(id string) (*container.Container, error)
<ide> // Create creates a new Docker container and returns potential warnings
<ide> // TODO: put warnings in the error
<del> Create(*runconfig.Config, *runconfig.HostConfig) (*daemon.Container, []string, error)
<add> Create(*runconfig.Config, *runconfig.HostConfig) (*container.Container, []string, error)
<ide> // Remove removes a container specified by `id`.
<ide> Remove(id string, cfg *daemon.ContainerRmConfig) error
<ide> // Commit creates a new Docker image from an existing Docker container.
<ide> type Docker interface {
<ide> // TODO: make an Extract method instead of passing `decompress`
<ide> // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used
<ide> // with Context.Walk
<del> Copy(c *daemon.Container, destPath string, src FileInfo, decompress bool) error
<add> Copy(c *container.Container, destPath string, src FileInfo, decompress bool) error
<ide>
<ide> // Retain retains an image avoiding it to be removed or overwritten until a corresponding Release() call.
<ide> // TODO: remove
<ide> type Docker interface {
<ide> // TODO: remove
<ide> Release(sessionID string, activeImages []string)
<ide> // Kill stops the container execution abruptly.
<del> Kill(c *daemon.Container) error
<add> Kill(c *container.Container) error
<ide> // Mount mounts the root filesystem for the container.
<del> Mount(c *daemon.Container) error
<add> Mount(c *container.Container) error
<ide> // Unmount unmounts the root filesystem for the container.
<del> Unmount(c *daemon.Container) error
<add> Unmount(c *container.Container) error
<ide> // Start starts a new container
<del> Start(c *daemon.Container) error
<add> Start(c *container.Container) error
<ide> }
<ide>
<ide> // ImageCache abstracts an image cache store.
<ide><path>builder/dockerfile/internals.go
<ide> import (
<ide> "github.com/docker/docker/api"
<ide> "github.com/docker/docker/builder"
<ide> "github.com/docker/docker/builder/dockerfile/parser"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/archive"
<ide> func (b *Builder) processImageFrom(img *image.Image) error {
<ide> }
<ide>
<ide> // The default path will be blank on Windows (set by HCS)
<del> if len(b.runConfig.Env) == 0 && daemon.DefaultPathEnv != "" {
<del> b.runConfig.Env = append(b.runConfig.Env, "PATH="+daemon.DefaultPathEnv)
<add> if len(b.runConfig.Env) == 0 && container.DefaultPathEnv != "" {
<add> b.runConfig.Env = append(b.runConfig.Env, "PATH="+container.DefaultPathEnv)
<ide> }
<ide>
<ide> // Process ONBUILD triggers if they exist
<ide> func (b *Builder) probeCache() (bool, error) {
<ide> return true, nil
<ide> }
<ide>
<del>func (b *Builder) create() (*daemon.Container, error) {
<add>func (b *Builder) create() (*container.Container, error) {
<ide> if b.image == "" && !b.noBaseImage {
<ide> return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
<ide> }
<ide> func (b *Builder) create() (*daemon.Container, error) {
<ide> return c, nil
<ide> }
<ide>
<del>func (b *Builder) run(c *daemon.Container) error {
<add>func (b *Builder) run(c *container.Container) error {
<ide> var errCh chan error
<ide> if b.Verbose {
<ide> errCh = c.Attach(nil, b.Stdout, b.Stderr)
<ide><path>container/archive.go
<add>package container
<add>
<add>import (
<add> "os"
<add> "path/filepath"
<add>
<add> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/pkg/archive"
<add>)
<add>
<add>// ResolvePath resolves the given path in the container to a resource on the
<add>// host. Returns a resolved path (absolute path to the resource on the host),
<add>// the absolute path to the resource relative to the container's rootfs, and
<add>// a error if the path points to outside the container's rootfs.
<add>func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
<add> // Consider the given path as an absolute path in the container.
<add> absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
<add>
<add> // Split the absPath into its Directory and Base components. We will
<add> // resolve the dir in the scope of the container then append the base.
<add> dirPath, basePath := filepath.Split(absPath)
<add>
<add> resolvedDirPath, err := container.GetResourcePath(dirPath)
<add> if err != nil {
<add> return "", "", err
<add> }
<add>
<add> // resolvedDirPath will have been cleaned (no trailing path separators) so
<add> // we can manually join it with the base path element.
<add> resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
<add>
<add> return resolvedPath, absPath, nil
<add>}
<add>
<add>// StatPath is the unexported version of StatPath. Locks and mounts should
<add>// be acquired before calling this method and the given path should be fully
<add>// resolved to a path on the host corresponding to the given absolute path
<add>// inside the container.
<add>func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
<add> lstat, err := os.Lstat(resolvedPath)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> var linkTarget string
<add> if lstat.Mode()&os.ModeSymlink != 0 {
<add> // Fully evaluate the symlink in the scope of the container rootfs.
<add> hostPath, err := container.GetResourcePath(absPath)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> linkTarget, err = filepath.Rel(container.BaseFS, hostPath)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> // Make it an absolute path.
<add> linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
<add> }
<add>
<add> return &types.ContainerPathStat{
<add> Name: filepath.Base(absPath),
<add> Size: lstat.Size(),
<add> Mode: lstat.Mode(),
<add> Mtime: lstat.ModTime(),
<add> LinkTarget: linkTarget,
<add> }, nil
<add>}
<add><path>container/container.go
<del><path>daemon/container.go
<del>package daemon
<add>package container
<ide>
<ide> import (
<ide> "encoding/json"
<del> "errors"
<ide> "fmt"
<ide> "io"
<ide> "os"
<ide> import (
<ide> "syscall"
<ide> "time"
<ide>
<del> "github.com/opencontainers/runc/libcontainer/label"
<del>
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/daemon/exec"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> import (
<ide> "github.com/docker/docker/pkg/promise"
<ide> "github.com/docker/docker/pkg/signal"
<ide> "github.com/docker/docker/pkg/symlink"
<del>
<ide> "github.com/docker/docker/runconfig"
<ide> "github.com/docker/docker/volume"
<add> "github.com/opencontainers/runc/libcontainer/label"
<ide> )
<ide>
<ide> const configFileName = "config.v2.json"
<ide>
<del>var (
<del> // ErrRootFSReadOnly is returned when a container
<del> // rootfs is marked readonly.
<del> ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
<del>)
<del>
<ide> // CommonContainer holds the fields for a container which are
<ide> // applicable across all platforms supported by the daemon.
<ide> type CommonContainer struct {
<ide> *runconfig.StreamConfig
<ide> // embed for Container to support states directly.
<ide> *State `json:"State"` // Needed for remote api version <= 1.11
<del> root string // Path to the "home" of the container, including metadata.
<del> basefs string // Path to the graphdriver mountpoint
<del> rwlayer layer.RWLayer
<add> Root string `json:"-"` // Path to the "home" of the container, including metadata.
<add> BaseFS string `json:"-"` // Path to the graphdriver mountpoint
<add> RWLayer layer.RWLayer `json:"-"`
<ide> ID string
<ide> Created time.Time
<ide> Path string
<ide> type CommonContainer struct {
<ide> HasBeenStartedBefore bool
<ide> HasBeenManuallyStopped bool // used for unless-stopped restart policy
<ide> MountPoints map[string]*volume.MountPoint
<del> hostConfig *runconfig.HostConfig
<del> command *execdriver.Command
<add> HostConfig *runconfig.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
<add> Command *execdriver.Command `json:"-"`
<ide> monitor *containerMonitor
<del> execCommands *exec.Store
<add> ExecCommands *exec.Store `json:"-"`
<ide> // logDriver for closing
<del> logDriver logger.Logger
<del> logCopier *logger.Copier
<add> LogDriver logger.Logger `json:"-"`
<add> LogCopier *logger.Copier `json:"-"`
<ide> }
<ide>
<del>// newBaseContainer creates a new container with its
<add>// NewBaseContainer creates a new container with its
<ide> // basic configuration.
<del>func newBaseContainer(id, root string) *Container {
<add>func NewBaseContainer(id, root string) *Container {
<ide> return &Container{
<ide> CommonContainer: CommonContainer{
<ide> ID: id,
<ide> State: NewState(),
<del> execCommands: exec.NewStore(),
<del> root: root,
<add> ExecCommands: exec.NewStore(),
<add> Root: root,
<ide> MountPoints: make(map[string]*volume.MountPoint),
<ide> StreamConfig: runconfig.NewStreamConfig(),
<ide> },
<ide> }
<ide> }
<ide>
<del>func (container *Container) fromDisk() error {
<del> pth, err := container.jsonPath()
<add>// FromDisk loads the container configuration stored in the host.
<add>func (container *Container) FromDisk() error {
<add> pth, err := container.ConfigPath()
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (container *Container) fromDisk() error {
<ide> return container.readHostConfig()
<ide> }
<ide>
<del>func (container *Container) toDisk() error {
<del> pth, err := container.jsonPath()
<add>// ToDisk saves the container configuration on disk.
<add>func (container *Container) ToDisk() error {
<add> pth, err := container.ConfigPath()
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (container *Container) toDisk() error {
<ide> return err
<ide> }
<ide>
<del> return container.writeHostConfig()
<add> return container.WriteHostConfig()
<ide> }
<ide>
<del>func (container *Container) toDiskLocking() error {
<add>// ToDiskLocking saves the container configuration on disk in a thread safe way.
<add>func (container *Container) ToDiskLocking() error {
<ide> container.Lock()
<del> err := container.toDisk()
<add> err := container.ToDisk()
<ide> container.Unlock()
<ide> return err
<ide> }
<ide>
<add>// readHostConfig reads the host configuration from disk for the container.
<ide> func (container *Container) readHostConfig() error {
<del> container.hostConfig = &runconfig.HostConfig{}
<add> container.HostConfig = &runconfig.HostConfig{}
<ide> // If the hostconfig file does not exist, do not read it.
<del> // (We still have to initialize container.hostConfig,
<add> // (We still have to initialize container.HostConfig,
<ide> // but that's OK, since we just did that above.)
<del> pth, err := container.hostConfigPath()
<add> pth, err := container.HostConfigPath()
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (container *Container) readHostConfig() error {
<ide> }
<ide> defer f.Close()
<ide>
<del> if err := json.NewDecoder(f).Decode(&container.hostConfig); err != nil {
<add> if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil {
<ide> return err
<ide> }
<ide>
<del> initDNSHostConfig(container)
<add> container.InitDNSHostConfig()
<ide>
<ide> return nil
<ide> }
<ide>
<del>func (container *Container) writeHostConfig() error {
<del> pth, err := container.hostConfigPath()
<add>// WriteHostConfig saves the host configuration on disk for the container.
<add>func (container *Container) WriteHostConfig() error {
<add> pth, err := container.HostConfigPath()
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (container *Container) writeHostConfig() error {
<ide> }
<ide> defer f.Close()
<ide>
<del> return json.NewEncoder(f).Encode(&container.hostConfig)
<add> return json.NewEncoder(f).Encode(&container.HostConfig)
<ide> }
<ide>
<del>// GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path
<del>// sanitisation. Symlinks are all scoped to the basefs of the container, as
<del>// though the container's basefs was `/`.
<add>// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path
<add>// sanitisation. Symlinks are all scoped to the BaseFS of the container, as
<add>// though the container's BaseFS was `/`.
<ide> //
<del>// The basefs of a container is the host-facing path which is bind-mounted as
<add>// The BaseFS of a container is the host-facing path which is bind-mounted as
<ide> // `/` inside the container. This method is essentially used to access a
<ide> // particular path inside the container as though you were a process in that
<ide> // container.
<ide> //
<del>// NOTE: The returned path is *only* safely scoped inside the container's basefs
<add>// NOTE: The returned path is *only* safely scoped inside the container's BaseFS
<ide> // if no component of the returned path changes (such as a component
<ide> // symlinking to a different path) between using this method and using the
<ide> // path. See symlink.FollowSymlinkInScope for more details.
<ide> func (container *Container) GetResourcePath(path string) (string, error) {
<ide> // IMPORTANT - These are paths on the OS where the daemon is running, hence
<ide> // any filepath operations must be done in an OS agnostic way.
<ide> cleanPath := filepath.Join(string(os.PathSeparator), path)
<del> r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs)
<add> r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS)
<ide> return r, e
<ide> }
<ide>
<del>// Evaluates `path` in the scope of the container's root, with proper path
<add>// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path
<ide> // sanitisation. Symlinks are all scoped to the root of the container, as
<ide> // though the container's root was `/`.
<ide> //
<ide> func (container *Container) GetResourcePath(path string) (string, error) {
<ide> // if no component of the returned path changes (such as a component
<ide> // symlinking to a different path) between using this method and using the
<ide> // path. See symlink.FollowSymlinkInScope for more details.
<del>func (container *Container) getRootResourcePath(path string) (string, error) {
<add>func (container *Container) GetRootResourcePath(path string) (string, error) {
<ide> // IMPORTANT - These are paths on the OS where the daemon is running, hence
<ide> // any filepath operations must be done in an OS agnostic way.
<ide> cleanPath := filepath.Join(string(os.PathSeparator), path)
<del> return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root)
<add> return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root)
<ide> }
<ide>
<ide> // ExitOnNext signals to the monitor that it should not restart the container
<ide> func (container *Container) ExitOnNext() {
<ide> // Resize changes the TTY of the process running inside the container
<ide> // to the given height and width. The container must be running.
<ide> func (container *Container) Resize(h, w int) error {
<del> if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil {
<add> if err := container.Command.ProcessConfig.Terminal.Resize(h, w); err != nil {
<ide> return err
<ide> }
<ide> return nil
<ide> }
<ide>
<del>func (container *Container) hostConfigPath() (string, error) {
<del> return container.getRootResourcePath("hostconfig.json")
<del>}
<del>
<del>func (container *Container) jsonPath() (string, error) {
<del> return container.getRootResourcePath(configFileName)
<add>// HostConfigPath returns the path to the container's JSON hostconfig
<add>func (container *Container) HostConfigPath() (string, error) {
<add> return container.GetRootResourcePath("hostconfig.json")
<ide> }
<ide>
<del>// This directory is only usable when the container is running
<del>func (container *Container) rootfsPath() string {
<del> return container.basefs
<add>// ConfigPath returns the path to the container's JSON config
<add>func (container *Container) ConfigPath() (string, error) {
<add> return container.GetRootResourcePath(configFileName)
<ide> }
<ide>
<ide> func validateID(id string) error {
<ide> func (container *Container) exposes(p nat.Port) bool {
<ide> return exists
<ide> }
<ide>
<del>func (container *Container) getLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig {
<del> cfg := container.hostConfig.LogConfig
<add>// GetLogConfig returns the log configuration for the container.
<add>func (container *Container) GetLogConfig(defaultConfig runconfig.LogConfig) runconfig.LogConfig {
<add> cfg := container.HostConfig.LogConfig
<ide> if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured
<ide> if cfg.Type == "" {
<ide> cfg.Type = jsonfilelog.Name
<ide> func (container *Container) StartLogger(cfg runconfig.LogConfig) (logger.Logger,
<ide>
<ide> // Set logging file for "json-logger"
<ide> if cfg.Type == jsonfilelog.Name {
<del> ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
<add> ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID))
<ide> if err != nil {
<ide> return nil, err
<ide> }
<ide> }
<ide> return c(ctx)
<ide> }
<ide>
<del>func (container *Container) getProcessLabel() string {
<add>// GetProcessLabel returns the process label for the container.
<add>func (container *Container) GetProcessLabel() string {
<ide> // even if we have a process label return "" if we are running
<ide> // in privileged mode
<del> if container.hostConfig.Privileged {
<add> if container.HostConfig.Privileged {
<ide> return ""
<ide> }
<ide> return container.ProcessLabel
<ide> }
<ide>
<del>func (container *Container) getMountLabel() string {
<del> if container.hostConfig.Privileged {
<add>// GetMountLabel returns the mounting label for the container.
<add>// This label is empty if the container is privileged.
<add>func (container *Container) GetMountLabel() string {
<add> if container.HostConfig.Privileged {
<ide> return ""
<ide> }
<ide> return container.MountLabel
<ide> }
<ide>
<del>func (container *Container) getExecIDs() []string {
<del> return container.execCommands.List()
<add>// GetExecIDs returns the list of exec commands running on the container.
<add>func (container *Container) GetExecIDs() []string {
<add> return container.ExecCommands.List()
<ide> }
<ide>
<ide> // Attach connects to the container's TTY, delegating to standard
<ide> // streams or websockets depending on the configuration.
<ide> func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
<del> return attach(container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
<add> return AttachStreams(container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr)
<ide> }
<ide>
<del>func attach(streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
<add>// AttachStreams connects streams to a TTY.
<add>// Used by exec too. Should this move somewhere else?
<add>func AttachStreams(streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error {
<ide> var (
<ide> cStdout, cStderr io.ReadCloser
<ide> cStdin io.WriteCloser
<ide> func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error)
<ide> return written, err
<ide> }
<ide>
<del>func (container *Container) shouldRestart() bool {
<del> return container.hostConfig.RestartPolicy.Name == "always" ||
<del> (container.hostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) ||
<del> (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
<add>// ShouldRestart decides whether the daemon should restart the container or not.
<add>// This is based on the container's restart policy.
<add>func (container *Container) ShouldRestart() bool {
<add> return container.HostConfig.RestartPolicy.Name == "always" ||
<add> (container.HostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) ||
<add> (container.HostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0)
<ide> }
<ide>
<del>func (container *Container) addBindMountPoint(name, source, destination string, rw bool) {
<add>// AddBindMountPoint adds a new bind mount point configuration to the container.
<add>func (container *Container) AddBindMountPoint(name, source, destination string, rw bool) {
<ide> container.MountPoints[destination] = &volume.MountPoint{
<ide> Name: name,
<ide> Source: source,
<ide> func (container *Container) addBindMountPoint(name, source, destination string,
<ide> }
<ide> }
<ide>
<del>func (container *Container) addLocalMountPoint(name, destination string, rw bool) {
<add>// AddLocalMountPoint adds a new local mount point configuration to the container.
<add>func (container *Container) AddLocalMountPoint(name, destination string, rw bool) {
<ide> container.MountPoints[destination] = &volume.MountPoint{
<ide> Name: name,
<ide> Driver: volume.DefaultDriverName,
<ide> func (container *Container) addLocalMountPoint(name, destination string, rw bool
<ide> }
<ide> }
<ide>
<del>func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
<add>// AddMountPointWithVolume adds a new mount point configured with a volume to the container.
<add>func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
<ide> container.MountPoints[destination] = &volume.MountPoint{
<ide> Name: vol.Name(),
<ide> Driver: vol.DriverName(),
<ide> func (container *Container) addMountPointWithVolume(destination string, vol volu
<ide> }
<ide> }
<ide>
<del>func (container *Container) isDestinationMounted(destination string) bool {
<add>// IsDestinationMounted checkes whether a path is mounted on the container or not.
<add>func (container *Container) IsDestinationMounted(destination string) bool {
<ide> return container.MountPoints[destination] != nil
<ide> }
<ide>
<del>func (container *Container) stopSignal() int {
<add>// StopSignal returns the signal used to stop the container.
<add>func (container *Container) StopSignal() int {
<ide> var stopSignal syscall.Signal
<ide> if container.Config.StopSignal != "" {
<ide> stopSignal, _ = signal.ParseSignal(container.Config.StopSignal)
<ide> func (container *Container) stopSignal() int {
<ide> return int(stopSignal)
<ide> }
<ide>
<del>// initDNSHostConfig ensures that the dns fields are never nil.
<add>// InitDNSHostConfig ensures that the dns fields are never nil.
<ide> // New containers don't ever have those fields nil,
<ide> // but pre created containers can still have those nil values.
<ide> // The non-recommended host configuration in the start api can
<ide> // make these fields nil again, this corrects that issue until
<ide> // we remove that behavior for good.
<ide> // See https://github.com/docker/docker/pull/17779
<ide> // for a more detailed explanation on why we don't want that.
<del>func initDNSHostConfig(container *Container) {
<del> if container.hostConfig.DNS == nil {
<del> container.hostConfig.DNS = make([]string, 0)
<add>func (container *Container) InitDNSHostConfig() {
<add> if container.HostConfig.DNS == nil {
<add> container.HostConfig.DNS = make([]string, 0)
<ide> }
<ide>
<del> if container.hostConfig.DNSSearch == nil {
<del> container.hostConfig.DNSSearch = make([]string, 0)
<add> if container.HostConfig.DNSSearch == nil {
<add> container.HostConfig.DNSSearch = make([]string, 0)
<ide> }
<ide>
<del> if container.hostConfig.DNSOptions == nil {
<del> container.hostConfig.DNSOptions = make([]string, 0)
<add> if container.HostConfig.DNSOptions == nil {
<add> container.HostConfig.DNSOptions = make([]string, 0)
<ide> }
<ide> }
<ide><path>container/container_unit_test.go
<add>package container
<add>
<add>import (
<add> "testing"
<add>
<add> "github.com/docker/docker/pkg/signal"
<add> "github.com/docker/docker/runconfig"
<add>)
<add>
<add>func TestContainerStopSignal(t *testing.T) {
<add> c := &Container{
<add> CommonContainer: CommonContainer{
<add> Config: &runconfig.Config{},
<add> },
<add> }
<add>
<add> def, err := signal.ParseSignal(signal.DefaultStopSignal)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> s := c.StopSignal()
<add> if s != int(def) {
<add> t.Fatalf("Expected %v, got %v", def, s)
<add> }
<add>
<add> c = &Container{
<add> CommonContainer: CommonContainer{
<add> Config: &runconfig.Config{StopSignal: "SIGKILL"},
<add> },
<add> }
<add> s = c.StopSignal()
<add> if s != 9 {
<add> t.Fatalf("Expected 9, got %v", s)
<add> }
<add>}
<ide><path>container/container_unix.go
<add>// +build linux freebsd
<add>
<add>package container
<add>
<add>import (
<add> "fmt"
<add> "io/ioutil"
<add> "net"
<add> "os"
<add> "path/filepath"
<add> "strconv"
<add> "strings"
<add> "syscall"
<add>
<add> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/daemon/execdriver"
<add> "github.com/docker/docker/daemon/network"
<add> derr "github.com/docker/docker/errors"
<add> "github.com/docker/docker/pkg/chrootarchive"
<add> "github.com/docker/docker/pkg/nat"
<add> "github.com/docker/docker/pkg/symlink"
<add> "github.com/docker/docker/pkg/system"
<add> "github.com/docker/docker/runconfig"
<add> "github.com/docker/docker/utils"
<add> "github.com/docker/docker/volume"
<add> "github.com/docker/libnetwork"
<add> "github.com/docker/libnetwork/netlabel"
<add> "github.com/docker/libnetwork/options"
<add> "github.com/docker/libnetwork/types"
<add> "github.com/opencontainers/runc/libcontainer/label"
<add>)
<add>
<add>const (
<add> // DefaultPathEnv is unix style list of directories to search for
<add> // executables. Each directory is separated from the next by a colon
<add> // ':' character .
<add> DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
<add>
<add> // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container
<add> DefaultSHMSize int64 = 67108864
<add>)
<add>
<add>// Container holds the fields specific to unixen implementations. See
<add>// CommonContainer for standard fields common to all containers.
<add>type Container struct {
<add> CommonContainer
<add>
<add> // Fields below here are platform specific.
<add> AppArmorProfile string
<add> HostnamePath string
<add> HostsPath string
<add> ShmPath string
<add> MqueuePath string
<add> ResolvConfPath string
<add>}
<add>
<add>// CreateDaemonEnvironment returns the list of all environment variables given the list of
<add>// environment variables related to links.
<add>// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM.
<add>// The defaults set here do not override the values in container.Config.Env
<add>func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string {
<add> // if a domain name was specified, append it to the hostname (see #7851)
<add> fullHostname := container.Config.Hostname
<add> if container.Config.Domainname != "" {
<add> fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
<add> }
<add> // Setup environment
<add> env := []string{
<add> "PATH=" + DefaultPathEnv,
<add> "HOSTNAME=" + fullHostname,
<add> // Note: we don't set HOME here because it'll get autoset intelligently
<add> // based on the value of USER inside dockerinit, but only if it isn't
<add> // set already (ie, that can be overridden by setting HOME via -e or ENV
<add> // in a Dockerfile).
<add> }
<add> if container.Config.Tty {
<add> env = append(env, "TERM=xterm")
<add> }
<add> env = append(env, linkedEnv...)
<add> // because the env on the container can override certain default values
<add> // we need to replace the 'env' keys where they match and append anything
<add> // else.
<add> env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
<add>
<add> return env
<add>}
<add>
<add>// TrySetNetworkMount attempts to set the network mounts given a provided destination and
<add>// the path to use for it; return true if the given destination was a network mount file
<add>func (container *Container) TrySetNetworkMount(destination string, path string) bool {
<add> if destination == "/etc/resolv.conf" {
<add> container.ResolvConfPath = path
<add> return true
<add> }
<add> if destination == "/etc/hostname" {
<add> container.HostnamePath = path
<add> return true
<add> }
<add> if destination == "/etc/hosts" {
<add> container.HostsPath = path
<add> return true
<add> }
<add>
<add> return false
<add>}
<add>
<add>// BuildHostnameFile writes the container's hostname file.
<add>func (container *Container) BuildHostnameFile() error {
<add> hostnamePath, err := container.GetRootResourcePath("hostname")
<add> if err != nil {
<add> return err
<add> }
<add> container.HostnamePath = hostnamePath
<add>
<add> if container.Config.Domainname != "" {
<add> return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
<add> }
<add> return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
<add>}
<add>
<add>// GetEndpointInNetwork returns the container's endpoint to the provided network.
<add>func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) {
<add> endpointName := strings.TrimPrefix(container.Name, "/")
<add> return n.EndpointByName(endpointName)
<add>}
<add>
<add>func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error {
<add> if ep == nil {
<add> return derr.ErrorCodeEmptyEndpoint
<add> }
<add>
<add> networkSettings := container.NetworkSettings
<add> if networkSettings == nil {
<add> return derr.ErrorCodeEmptyNetwork
<add> }
<add>
<add> driverInfo, err := ep.DriverInfo()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if driverInfo == nil {
<add> // It is not an error for epInfo to be nil
<add> return nil
<add> }
<add>
<add> if networkSettings.Ports == nil {
<add> networkSettings.Ports = nat.PortMap{}
<add> }
<add>
<add> if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
<add> if exposedPorts, ok := expData.([]types.TransportPort); ok {
<add> for _, tp := range exposedPorts {
<add> natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
<add> if err != nil {
<add> return derr.ErrorCodeParsingPort.WithArgs(tp.Port, err)
<add> }
<add> networkSettings.Ports[natPort] = nil
<add> }
<add> }
<add> }
<add>
<add> mapData, ok := driverInfo[netlabel.PortMap]
<add> if !ok {
<add> return nil
<add> }
<add>
<add> if portMapping, ok := mapData.([]types.PortBinding); ok {
<add> for _, pp := range portMapping {
<add> natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
<add> if err != nil {
<add> return err
<add> }
<add> natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
<add> networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
<add> }
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint.
<add>func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
<add> if ep == nil {
<add> return derr.ErrorCodeEmptyEndpoint
<add> }
<add>
<add> networkSettings := container.NetworkSettings
<add> if networkSettings == nil {
<add> return derr.ErrorCodeEmptyNetwork
<add> }
<add>
<add> epInfo := ep.Info()
<add> if epInfo == nil {
<add> // It is not an error to get an empty endpoint info
<add> return nil
<add> }
<add>
<add> if _, ok := networkSettings.Networks[n.Name()]; !ok {
<add> networkSettings.Networks[n.Name()] = new(network.EndpointSettings)
<add> }
<add> networkSettings.Networks[n.Name()].EndpointID = ep.ID()
<add>
<add> iface := epInfo.Iface()
<add> if iface == nil {
<add> return nil
<add> }
<add>
<add> if iface.MacAddress() != nil {
<add> networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
<add> }
<add>
<add> if iface.Address() != nil {
<add> ones, _ := iface.Address().Mask.Size()
<add> networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
<add> networkSettings.Networks[n.Name()].IPPrefixLen = ones
<add> }
<add>
<add> if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
<add> onesv6, _ := iface.AddressIPv6().Mask.Size()
<add> networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
<add> networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// UpdateJoinInfo updates network settings when container joins network n with endpoint ep.
<add>func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
<add> if err := container.buildPortMapInfo(ep); err != nil {
<add> return err
<add> }
<add>
<add> epInfo := ep.Info()
<add> if epInfo == nil {
<add> // It is not an error to get an empty endpoint info
<add> return nil
<add> }
<add> if epInfo.Gateway() != nil {
<add> container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String()
<add> }
<add> if epInfo.GatewayIPv6().To16() != nil {
<add> container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String()
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// UpdateSandboxNetworkSettings updates the sandbox ID and Key.
<add>func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error {
<add> container.NetworkSettings.SandboxID = sb.ID()
<add> container.NetworkSettings.SandboxKey = sb.Key()
<add> return nil
<add>}
<add>
<add>// BuildCreateEndpointOptions builds endpoint options from a given network.
<add>func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
<add> var (
<add> portSpecs = make(nat.PortSet)
<add> bindings = make(nat.PortMap)
<add> pbList []types.PortBinding
<add> exposeList []types.TransportPort
<add> createOptions []libnetwork.EndpointOption
<add> )
<add>
<add> if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint {
<add> createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
<add> }
<add>
<add> // Other configs are applicable only for the endpoint in the network
<add> // to which container was connected to on docker run.
<add> if n.Name() != container.HostConfig.NetworkMode.NetworkName() &&
<add> !(n.Name() == "bridge" && container.HostConfig.NetworkMode.IsDefault()) {
<add> return createOptions, nil
<add> }
<add>
<add> if container.Config.ExposedPorts != nil {
<add> portSpecs = container.Config.ExposedPorts
<add> }
<add>
<add> if container.HostConfig.PortBindings != nil {
<add> for p, b := range container.HostConfig.PortBindings {
<add> bindings[p] = []nat.PortBinding{}
<add> for _, bb := range b {
<add> bindings[p] = append(bindings[p], nat.PortBinding{
<add> HostIP: bb.HostIP,
<add> HostPort: bb.HostPort,
<add> })
<add> }
<add> }
<add> }
<add>
<add> ports := make([]nat.Port, len(portSpecs))
<add> var i int
<add> for p := range portSpecs {
<add> ports[i] = p
<add> i++
<add> }
<add> nat.SortPortMap(ports, bindings)
<add> for _, port := range ports {
<add> expose := types.TransportPort{}
<add> expose.Proto = types.ParseProtocol(port.Proto())
<add> expose.Port = uint16(port.Int())
<add> exposeList = append(exposeList, expose)
<add>
<add> pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
<add> binding := bindings[port]
<add> for i := 0; i < len(binding); i++ {
<add> pbCopy := pb.GetCopy()
<add> newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
<add> var portStart, portEnd int
<add> if err == nil {
<add> portStart, portEnd, err = newP.Range()
<add> }
<add> if err != nil {
<add> return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err)
<add> }
<add> pbCopy.HostPort = uint16(portStart)
<add> pbCopy.HostPortEnd = uint16(portEnd)
<add> pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
<add> pbList = append(pbList, pbCopy)
<add> }
<add>
<add> if container.HostConfig.PublishAllPorts && len(binding) == 0 {
<add> pbList = append(pbList, pb)
<add> }
<add> }
<add>
<add> createOptions = append(createOptions,
<add> libnetwork.CreateOptionPortMapping(pbList),
<add> libnetwork.CreateOptionExposedPorts(exposeList))
<add>
<add> if container.Config.MacAddress != "" {
<add> mac, err := net.ParseMAC(container.Config.MacAddress)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> genericOption := options.Generic{
<add> netlabel.MacAddress: mac,
<add> }
<add>
<add> createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
<add> }
<add>
<add> return createOptions, nil
<add>}
<add>
<add>// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir
<add>func (container *Container) SetupWorkingDirectory() error {
<add> if container.Config.WorkingDir == "" {
<add> return nil
<add> }
<add> container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
<add>
<add> pth, err := container.GetResourcePath(container.Config.WorkingDir)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> pthInfo, err := os.Stat(pth)
<add> if err != nil {
<add> if !os.IsNotExist(err) {
<add> return err
<add> }
<add>
<add> if err := system.MkdirAll(pth, 0755); err != nil {
<add> return err
<add> }
<add> }
<add> if pthInfo != nil && !pthInfo.IsDir() {
<add> return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir)
<add> }
<add> return nil
<add>}
<add>
<add>// DisconnectFromNetwork disconnects a container from a network
<add>func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
<add> if !container.Running {
<add> return derr.ErrorCodeNotRunning.WithArgs(container.ID)
<add> }
<add>
<add> if container.HostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
<add> return runconfig.ErrConflictHostNetwork
<add> }
<add>
<add> if err := container.disconnectFromNetwork(n); err != nil {
<add> return err
<add> }
<add>
<add> if err := container.ToDiskLocking(); err != nil {
<add> return fmt.Errorf("Error saving container to disk: %v", err)
<add> }
<add> return nil
<add>}
<add>
<add>func (container *Container) disconnectFromNetwork(n libnetwork.Network) error {
<add> var (
<add> ep libnetwork.Endpoint
<add> sbox libnetwork.Sandbox
<add> )
<add>
<add> s := func(current libnetwork.Endpoint) bool {
<add> epInfo := current.Info()
<add> if epInfo == nil {
<add> return false
<add> }
<add> if sb := epInfo.Sandbox(); sb != nil {
<add> if sb.ContainerID() == container.ID {
<add> ep = current
<add> sbox = sb
<add> return true
<add> }
<add> }
<add> return false
<add> }
<add> n.WalkEndpoints(s)
<add>
<add> if ep == nil {
<add> return fmt.Errorf("container %s is not connected to the network", container.ID)
<add> }
<add>
<add> if err := ep.Leave(sbox); err != nil {
<add> return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
<add> }
<add>
<add> if err := ep.Delete(); err != nil {
<add> return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
<add> }
<add>
<add> delete(container.NetworkSettings.Networks, n.Name())
<add> return nil
<add>}
<add>
<add>// appendNetworkMounts appends any network mounts to the array of mount points passed in
<add>func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
<add> for _, mnt := range container.NetworkMounts() {
<add> dest, err := container.GetResourcePath(mnt.Destination)
<add> if err != nil {
<add> return nil, err
<add> }
<add> volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest})
<add> }
<add> return volumeMounts, nil
<add>}
<add>
<add>// NetworkMounts returns the list of network mounts.
<add>func (container *Container) NetworkMounts() []execdriver.Mount {
<add> var mounts []execdriver.Mount
<add> shared := container.HostConfig.NetworkMode.IsContainer()
<add> if container.ResolvConfPath != "" {
<add> if _, err := os.Stat(container.ResolvConfPath); err != nil {
<add> logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
<add> } else {
<add> label.Relabel(container.ResolvConfPath, container.MountLabel, shared)
<add> writable := !container.HostConfig.ReadonlyRootfs
<add> if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
<add> writable = m.RW
<add> }
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: container.ResolvConfPath,
<add> Destination: "/etc/resolv.conf",
<add> Writable: writable,
<add> Private: true,
<add> })
<add> }
<add> }
<add> if container.HostnamePath != "" {
<add> if _, err := os.Stat(container.HostnamePath); err != nil {
<add> logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
<add> } else {
<add> label.Relabel(container.HostnamePath, container.MountLabel, shared)
<add> writable := !container.HostConfig.ReadonlyRootfs
<add> if m, exists := container.MountPoints["/etc/hostname"]; exists {
<add> writable = m.RW
<add> }
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: container.HostnamePath,
<add> Destination: "/etc/hostname",
<add> Writable: writable,
<add> Private: true,
<add> })
<add> }
<add> }
<add> if container.HostsPath != "" {
<add> if _, err := os.Stat(container.HostsPath); err != nil {
<add> logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
<add> } else {
<add> label.Relabel(container.HostsPath, container.MountLabel, shared)
<add> writable := !container.HostConfig.ReadonlyRootfs
<add> if m, exists := container.MountPoints["/etc/hosts"]; exists {
<add> writable = m.RW
<add> }
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: container.HostsPath,
<add> Destination: "/etc/hosts",
<add> Writable: writable,
<add> Private: true,
<add> })
<add> }
<add> }
<add> return mounts
<add>}
<add>
<add>// CopyImagePathContent copies files in destination to the volume.
<add>func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
<add> rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if _, err = ioutil.ReadDir(rootfs); err != nil {
<add> if os.IsNotExist(err) {
<add> return nil
<add> }
<add> return err
<add> }
<add>
<add> path, err := v.Mount()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if err := copyExistingContents(rootfs, path); err != nil {
<add> return err
<add> }
<add>
<add> return v.Unmount()
<add>}
<add>
<add>// ShmResourcePath returns path to shm
<add>func (container *Container) ShmResourcePath() (string, error) {
<add> return container.GetRootResourcePath("shm")
<add>}
<add>
<add>// MqueueResourcePath returns path to mqueue
<add>func (container *Container) MqueueResourcePath() (string, error) {
<add> return container.GetRootResourcePath("mqueue")
<add>}
<add>
<add>// HasMountFor checks if path is a mountpoint
<add>func (container *Container) HasMountFor(path string) bool {
<add> _, exists := container.MountPoints[path]
<add> return exists
<add>}
<add>
<add>// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted
<add>func (container *Container) UnmountIpcMounts(unmount func(pth string) error) {
<add> if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() {
<add> return
<add> }
<add>
<add> var warnings []string
<add>
<add> if !container.HasMountFor("/dev/shm") {
<add> shmPath, err := container.ShmResourcePath()
<add> if err != nil {
<add> logrus.Error(err)
<add> warnings = append(warnings, err.Error())
<add> } else if shmPath != "" {
<add> if err := unmount(shmPath); err != nil {
<add> warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err))
<add> }
<add>
<add> }
<add> }
<add>
<add> if !container.HasMountFor("/dev/mqueue") {
<add> mqueuePath, err := container.MqueueResourcePath()
<add> if err != nil {
<add> logrus.Error(err)
<add> warnings = append(warnings, err.Error())
<add> } else if mqueuePath != "" {
<add> if err := unmount(mqueuePath); err != nil {
<add> warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", mqueuePath, err))
<add> }
<add> }
<add> }
<add>
<add> if len(warnings) > 0 {
<add> logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n"))
<add> }
<add>}
<add>
<add>// IpcMounts returns the list of IPC mounts
<add>func (container *Container) IpcMounts() []execdriver.Mount {
<add> var mounts []execdriver.Mount
<add>
<add> if !container.HasMountFor("/dev/shm") {
<add> label.SetFileLabel(container.ShmPath, container.MountLabel)
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: container.ShmPath,
<add> Destination: "/dev/shm",
<add> Writable: true,
<add> Private: true,
<add> })
<add> }
<add>
<add> if !container.HasMountFor("/dev/mqueue") {
<add> label.SetFileLabel(container.MqueuePath, container.MountLabel)
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: container.MqueuePath,
<add> Destination: "/dev/mqueue",
<add> Writable: true,
<add> Private: true,
<add> })
<add> }
<add> return mounts
<add>}
<add>
<add>func detachMounted(path string) error {
<add> return syscall.Unmount(path, syscall.MNT_DETACH)
<add>}
<add>
<add>// UnmountVolumes unmounts all volumes
<add>func (container *Container) UnmountVolumes(forceSyscall bool) error {
<add> var (
<add> volumeMounts []volume.MountPoint
<add> err error
<add> )
<add>
<add> for _, mntPoint := range container.MountPoints {
<add> dest, err := container.GetResourcePath(mntPoint.Destination)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume})
<add> }
<add>
<add> // Append any network mounts to the list (this is a no-op on Windows)
<add> if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil {
<add> return err
<add> }
<add>
<add> for _, volumeMount := range volumeMounts {
<add> if forceSyscall {
<add> if err := detachMounted(volumeMount.Destination); err != nil {
<add> logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err)
<add> }
<add> }
<add>
<add> if volumeMount.Volume != nil {
<add> if err := volumeMount.Volume.Unmount(); err != nil {
<add> return err
<add> }
<add> }
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// copyExistingContents copies from the source to the destination and
<add>// ensures the ownership is appropriately set.
<add>func copyExistingContents(source, destination string) error {
<add> volList, err := ioutil.ReadDir(source)
<add> if err != nil {
<add> return err
<add> }
<add> if len(volList) > 0 {
<add> srcList, err := ioutil.ReadDir(destination)
<add> if err != nil {
<add> return err
<add> }
<add> if len(srcList) == 0 {
<add> // If the source volume is empty copy files from the root into the volume
<add> if err := chrootarchive.CopyWithTar(source, destination); err != nil {
<add> return err
<add> }
<add> }
<add> }
<add> return copyOwnership(source, destination)
<add>}
<add>
<add>// copyOwnership copies the permissions and uid:gid of the source file
<add>// to the destination file
<add>func copyOwnership(source, destination string) error {
<add> stat, err := system.Stat(source)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil {
<add> return err
<add> }
<add>
<add> return os.Chmod(destination, os.FileMode(stat.Mode()))
<add>}
<add>
<add>// TmpfsMounts returns the list of tmpfs mounts
<add>func (container *Container) TmpfsMounts() []execdriver.Mount {
<add> var mounts []execdriver.Mount
<add> for dest, data := range container.HostConfig.Tmpfs {
<add> mounts = append(mounts, execdriver.Mount{
<add> Source: "tmpfs",
<add> Destination: dest,
<add> Data: data,
<add> })
<add> }
<add> return mounts
<add>}
<ide><path>container/container_windows.go
<add>// +build windows
<add>
<add>package container
<add>
<add>import (
<add> "github.com/docker/docker/daemon/execdriver"
<add> "github.com/docker/docker/volume"
<add> "github.com/docker/libnetwork"
<add>)
<add>
<add>// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
<add>// the container. Docker has no context of what the default path should be.
<add>const DefaultPathEnv = ""
<add>
<add>// Container holds fields specific to the Windows implementation. See
<add>// CommonContainer for standard fields common to all containers.
<add>type Container struct {
<add> CommonContainer
<add>
<add> // Fields below here are platform specific.
<add>}
<add>
<add>// CreateDaemonEnvironment creates a new environment variable slice for this container.
<add>func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string {
<add> // On Windows, nothing to link. Just return the container environment.
<add> return container.Config.Env
<add>}
<add>
<add>// DisconnectFromNetwork disconnects a container from the network.
<add>func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
<add> return nil
<add>}
<add>
<add>// SetupWorkingDirectory initializes the container working directory.
<add>// This is a NOOP In windows.
<add>func (container *Container) SetupWorkingDirectory() error {
<add> return nil
<add>}
<add>
<add>// UnmountIpcMounts unmount Ipc related mounts.
<add>// This is a NOOP on windows.
<add>func (container *Container) UnmountIpcMounts(unmount func(pth string) error) {
<add>}
<add>
<add>// IpcMounts returns the list of Ipc related mounts.
<add>func (container *Container) IpcMounts() []execdriver.Mount {
<add> return nil
<add>}
<add>
<add>// UnmountVolumes explicitely unmounts volumes from the container.
<add>func (container *Container) UnmountVolumes(forceSyscall bool) error {
<add> return nil
<add>}
<add>
<add>// TmpfsMounts returns the list of tmpfs mounts
<add>func (container *Container) TmpfsMounts() []execdriver.Mount {
<add> return nil
<add>}
<add>
<add>// appendNetworkMounts appends any network mounts to the array of mount points passed in.
<add>// Windows does not support network mounts (not to be confused with SMB network mounts), so
<add>// this is a no-op.
<add>func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
<add> return volumeMounts, nil
<add>}
<add><path>container/monitor.go
<del><path>daemon/monitor.go
<del>package daemon
<add>package container
<ide>
<ide> import (
<ide> "io"
<ide> import (
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<add> "github.com/docker/docker/pkg/promise"
<ide> "github.com/docker/docker/pkg/stringid"
<ide> "github.com/docker/docker/runconfig"
<ide> "github.com/docker/docker/utils"
<ide> const (
<ide> loggerCloseTimeout = 10 * time.Second
<ide> )
<ide>
<del>// containerSupervisor defines the interface that a supervisor must implement
<del>type containerSupervisor interface {
<add>// supervisor defines the interface that a supervisor must implement
<add>type supervisor interface {
<ide> // LogContainerEvent generates events related to a given container
<ide> LogContainerEvent(*Container, string)
<ide> // Cleanup ensures that the container is properly unmounted
<ide> type containerMonitor struct {
<ide> mux sync.Mutex
<ide>
<ide> // supervisor keeps track of the container and the events it generates
<del> supervisor containerSupervisor
<add> supervisor supervisor
<ide>
<ide> // container is the container being monitored
<ide> container *Container
<ide> type containerMonitor struct {
<ide> lastStartTime time.Time
<ide> }
<ide>
<del>// newContainerMonitor returns an initialized containerMonitor for the provided container
<del>// honoring the provided restart policy
<del>func (daemon *Daemon) newContainerMonitor(container *Container, policy runconfig.RestartPolicy) *containerMonitor {
<del> return &containerMonitor{
<del> supervisor: daemon,
<add>// StartMonitor initializes a containerMonitor for this container with the provided supervisor and restart policy
<add>// and starts the container's process.
<add>func (container *Container) StartMonitor(s supervisor, policy runconfig.RestartPolicy) error {
<add> container.monitor = &containerMonitor{
<add> supervisor: s,
<ide> container: container,
<ide> restartPolicy: policy,
<ide> timeIncrement: defaultTimeIncrement,
<ide> stopChan: make(chan struct{}),
<ide> startSignal: make(chan struct{}),
<ide> }
<add>
<add> return container.monitor.wait()
<add>}
<add>
<add>// wait starts the container and wait until
<add>// we either receive an error from the initial start of the container's
<add>// process or until the process is running in the container
<add>func (m *containerMonitor) wait() error {
<add> select {
<add> case <-m.startSignal:
<add> case err := <-promise.Go(m.start):
<add> return err
<add> }
<add>
<add> return nil
<ide> }
<ide>
<ide> // Stop signals to the container monitor that it should stop monitoring the container
<ide> func (m *containerMonitor) Close() error {
<ide> // FIXME: here is race condition between two RUN instructions in Dockerfile
<ide> // because they share same runconfig and change image. Must be fixed
<ide> // in builder/builder.go
<del> if err := m.container.toDisk(); err != nil {
<add> if err := m.container.ToDisk(); err != nil {
<ide> logrus.Errorf("Error dumping container %s state to disk: %s", m.container.ID, err)
<ide>
<ide> return err
<ide> func (m *containerMonitor) Close() error {
<ide> }
<ide>
<ide> // Start starts the containers process and monitors it according to the restart policy
<del>func (m *containerMonitor) Start() error {
<add>func (m *containerMonitor) start() error {
<ide> var (
<ide> err error
<ide> exitStatus execdriver.ExitStatus
<ide> func (m *containerMonitor) Start() error {
<ide> if afterRun {
<ide> m.container.Lock()
<ide> defer m.container.Unlock()
<del> m.container.setStopped(&exitStatus)
<add> m.container.SetStopped(&exitStatus)
<ide> }
<ide> m.Close()
<ide> }()
<ide> func (m *containerMonitor) Start() error {
<ide> m.resetMonitor(err == nil && exitStatus.ExitCode == 0)
<ide>
<ide> if m.shouldRestart(exitStatus.ExitCode) {
<del> m.container.setRestarting(&exitStatus)
<add> m.container.SetRestarting(&exitStatus)
<ide> m.logEvent("die")
<ide> m.resetContainer(true)
<ide>
<ide> func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
<ide> }()
<ide>
<ide> if processConfig.Tty {
<del> // The callback is called after the process Start()
<add> // The callback is called after the process start()
<ide> // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave
<ide> // which we close here.
<ide> if c, ok := processConfig.Stdout.(io.Closer); ok {
<ide> c.Close()
<ide> }
<ide> }
<ide>
<del> m.container.setRunning(pid)
<add> m.container.SetRunning(pid)
<ide>
<ide> // signal that the process has started
<ide> // close channel only if not closed
<ide> func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid
<ide> close(m.startSignal)
<ide> }
<ide>
<del> if err := m.container.toDiskLocking(); err != nil {
<add> if err := m.container.ToDiskLocking(); err != nil {
<ide> logrus.Errorf("Error saving container to disk: %v", err)
<ide> }
<ide> return nil
<ide> func (m *containerMonitor) resetContainer(lock bool) {
<ide> logrus.Errorf("%s: %s", container.ID, err)
<ide> }
<ide>
<del> if container.command != nil && container.command.ProcessConfig.Terminal != nil {
<del> if err := container.command.ProcessConfig.Terminal.Close(); err != nil {
<add> if container.Command != nil && container.Command.ProcessConfig.Terminal != nil {
<add> if err := container.Command.ProcessConfig.Terminal.Close(); err != nil {
<ide> logrus.Errorf("%s: Error closing terminal: %s", container.ID, err)
<ide> }
<ide> }
<ide> func (m *containerMonitor) resetContainer(lock bool) {
<ide> container.NewInputPipes()
<ide> }
<ide>
<del> if container.logDriver != nil {
<del> if container.logCopier != nil {
<add> if container.LogDriver != nil {
<add> if container.LogCopier != nil {
<ide> exit := make(chan struct{})
<ide> go func() {
<del> container.logCopier.Wait()
<add> container.LogCopier.Wait()
<ide> close(exit)
<ide> }()
<ide> select {
<ide> func (m *containerMonitor) resetContainer(lock bool) {
<ide> case <-exit:
<ide> }
<ide> }
<del> container.logDriver.Close()
<del> container.logCopier = nil
<del> container.logDriver = nil
<add> container.LogDriver.Close()
<add> container.LogCopier = nil
<add> container.LogDriver = nil
<ide> }
<ide>
<del> c := container.command.ProcessConfig.Cmd
<add> c := container.Command.ProcessConfig.Cmd
<ide>
<del> container.command.ProcessConfig.Cmd = exec.Cmd{
<add> container.Command.ProcessConfig.Cmd = exec.Cmd{
<ide> Stdin: c.Stdin,
<ide> Stdout: c.Stdout,
<ide> Stderr: c.Stderr,
<add><path>container/state.go
<del><path>daemon/state.go
<del>package daemon
<add>package container
<ide>
<ide> import (
<ide> "fmt"
<ide> type State struct {
<ide> Paused bool
<ide> Restarting bool
<ide> OOMKilled bool
<del> removalInProgress bool // Not need for this to be persistent on disk.
<add> RemovalInProgress bool // Not need for this to be persistent on disk.
<ide> Dead bool
<ide> Pid int
<ide> ExitCode int
<ide> func (s *State) String() string {
<ide> return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
<ide> }
<ide>
<del> if s.removalInProgress {
<add> if s.RemovalInProgress {
<ide> return "Removal In Progress"
<ide> }
<ide>
<ide> func (s *State) StateString() string {
<ide> return "exited"
<ide> }
<ide>
<del>func isValidStateString(s string) bool {
<add>// IsValidStateString checks if the provided string is a valid container state or not.
<add>func IsValidStateString(s string) bool {
<ide> if s != "paused" &&
<ide> s != "restarting" &&
<ide> s != "running" &&
<ide> func wait(waitChan <-chan struct{}, timeout time.Duration) error {
<ide> // waitRunning waits until state is running. If state is already
<ide> // running it returns immediately. If you want wait forever you must
<ide> // supply negative timeout. Returns pid, that was passed to
<del>// setRunning.
<add>// SetRunning.
<ide> func (s *State) waitRunning(timeout time.Duration) (int, error) {
<ide> s.Lock()
<ide> if s.Running {
<ide> func (s *State) waitRunning(timeout time.Duration) (int, error) {
<ide>
<ide> // WaitStop waits until state is stopped. If state already stopped it returns
<ide> // immediately. If you want wait forever you must supply negative timeout.
<del>// Returns exit code, that was passed to setStoppedLocking
<add>// Returns exit code, that was passed to SetStoppedLocking
<ide> func (s *State) WaitStop(timeout time.Duration) (int, error) {
<ide> s.Lock()
<ide> if !s.Running {
<ide> func (s *State) getExitCode() int {
<ide> return res
<ide> }
<ide>
<del>func (s *State) setRunning(pid int) {
<add>// SetRunning sets the state of the container to "running".
<add>func (s *State) SetRunning(pid int) {
<ide> s.Error = ""
<ide> s.Running = true
<ide> s.Paused = false
<ide> func (s *State) setRunning(pid int) {
<ide> s.waitChan = make(chan struct{})
<ide> }
<ide>
<del>func (s *State) setStoppedLocking(exitStatus *execdriver.ExitStatus) {
<add>// SetStoppedLocking locks the container state is sets it to "stopped".
<add>func (s *State) SetStoppedLocking(exitStatus *execdriver.ExitStatus) {
<ide> s.Lock()
<del> s.setStopped(exitStatus)
<add> s.SetStopped(exitStatus)
<ide> s.Unlock()
<ide> }
<ide>
<del>func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
<add>// SetStopped sets the container state to "stopped" without locking.
<add>func (s *State) SetStopped(exitStatus *execdriver.ExitStatus) {
<ide> s.Running = false
<ide> s.Restarting = false
<ide> s.Pid = 0
<ide> func (s *State) setStopped(exitStatus *execdriver.ExitStatus) {
<ide> s.waitChan = make(chan struct{})
<ide> }
<ide>
<del>// setRestarting is when docker handles the auto restart of containers when they are
<add>// SetRestartingLocking is when docker handles the auto restart of containers when they are
<ide> // in the middle of a stop and being restarted again
<del>func (s *State) setRestartingLocking(exitStatus *execdriver.ExitStatus) {
<add>func (s *State) SetRestartingLocking(exitStatus *execdriver.ExitStatus) {
<ide> s.Lock()
<del> s.setRestarting(exitStatus)
<add> s.SetRestarting(exitStatus)
<ide> s.Unlock()
<ide> }
<ide>
<del>func (s *State) setRestarting(exitStatus *execdriver.ExitStatus) {
<add>// SetRestarting sets the container state to "restarting".
<add>// It also sets the container PID to 0.
<add>func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) {
<ide> // we should consider the container running when it is restarting because of
<ide> // all the checks in docker around rm/stop/etc
<ide> s.Running = true
<ide> func (s *State) setRestarting(exitStatus *execdriver.ExitStatus) {
<ide> s.waitChan = make(chan struct{})
<ide> }
<ide>
<del>// setError sets the container's error state. This is useful when we want to
<add>// SetError sets the container's error state. This is useful when we want to
<ide> // know the error that occurred when container transits to another state
<ide> // when inspecting it
<del>func (s *State) setError(err error) {
<add>func (s *State) SetError(err error) {
<ide> s.Error = err.Error()
<ide> }
<ide>
<del>func (s *State) isPaused() bool {
<add>// IsPaused returns whether the container is paused or not.
<add>func (s *State) IsPaused() bool {
<ide> s.Lock()
<ide> res := s.Paused
<ide> s.Unlock()
<ide> return res
<ide> }
<ide>
<del>func (s *State) setRemovalInProgress() error {
<add>// SetRemovalInProgress sets the container state as being removed.
<add>func (s *State) SetRemovalInProgress() error {
<ide> s.Lock()
<ide> defer s.Unlock()
<del> if s.removalInProgress {
<add> if s.RemovalInProgress {
<ide> return derr.ErrorCodeAlreadyRemoving
<ide> }
<del> s.removalInProgress = true
<add> s.RemovalInProgress = true
<ide> return nil
<ide> }
<ide>
<del>func (s *State) resetRemovalInProgress() {
<add>// ResetRemovalInProgress make the RemovalInProgress state to false.
<add>func (s *State) ResetRemovalInProgress() {
<ide> s.Lock()
<del> s.removalInProgress = false
<add> s.RemovalInProgress = false
<ide> s.Unlock()
<ide> }
<ide>
<del>func (s *State) setDead() {
<add>// SetDead sets the container state to "dead"
<add>func (s *State) SetDead() {
<ide> s.Lock()
<ide> s.Dead = true
<ide> s.Unlock()
<add><path>container/state_test.go
<del><path>daemon/state_test.go
<del>package daemon
<add>package container
<ide>
<ide> import (
<ide> "sync/atomic"
<ide> func TestStateRunStop(t *testing.T) {
<ide> close(started)
<ide> }()
<ide> s.Lock()
<del> s.setRunning(i + 100)
<add> s.SetRunning(i + 100)
<ide> s.Unlock()
<ide>
<ide> if !s.IsRunning() {
<ide> func TestStateRunStop(t *testing.T) {
<ide> atomic.StoreInt64(&exit, int64(exitCode))
<ide> close(stopped)
<ide> }()
<del> s.setStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
<add> s.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: i})
<ide> if s.IsRunning() {
<ide> t.Fatal("State is running")
<ide> }
<ide> func TestStateTimeoutWait(t *testing.T) {
<ide> }
<ide>
<ide> s.Lock()
<del> s.setRunning(49)
<add> s.SetRunning(49)
<ide> s.Unlock()
<ide>
<ide> stopped := make(chan struct{})
<add><path>container/state_unix.go
<del><path>daemon/state_unix.go
<ide> // +build linux freebsd
<ide>
<del>package daemon
<add>package container
<ide>
<ide> import "github.com/docker/docker/daemon/execdriver"
<ide>
<add><path>container/state_windows.go
<del><path>daemon/state_windows.go
<del>package daemon
<add>package container
<ide>
<ide> import "github.com/docker/docker/daemon/execdriver"
<ide>
<ide><path>daemon/archive.go
<ide> import (
<ide> "strings"
<ide>
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/pkg/archive"
<ide> "github.com/docker/docker/pkg/chrootarchive"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNon
<ide> return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content)
<ide> }
<ide>
<del>// resolvePath resolves the given path in the container to a resource on the
<del>// host. Returns a resolved path (absolute path to the resource on the host),
<del>// the absolute path to the resource relative to the container's rootfs, and
<del>// a error if the path points to outside the container's rootfs.
<del>func (container *Container) resolvePath(path string) (resolvedPath, absPath string, err error) {
<del> // Consider the given path as an absolute path in the container.
<del> absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
<del>
<del> // Split the absPath into its Directory and Base components. We will
<del> // resolve the dir in the scope of the container then append the base.
<del> dirPath, basePath := filepath.Split(absPath)
<del>
<del> resolvedDirPath, err := container.GetResourcePath(dirPath)
<del> if err != nil {
<del> return "", "", err
<del> }
<del>
<del> // resolvedDirPath will have been cleaned (no trailing path separators) so
<del> // we can manually join it with the base path element.
<del> resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
<del>
<del> return resolvedPath, absPath, nil
<del>}
<del>
<del>// statPath is the unexported version of StatPath. Locks and mounts should
<del>// be acquired before calling this method and the given path should be fully
<del>// resolved to a path on the host corresponding to the given absolute path
<del>// inside the container.
<del>func (container *Container) statPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
<del> lstat, err := os.Lstat(resolvedPath)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> var linkTarget string
<del> if lstat.Mode()&os.ModeSymlink != 0 {
<del> // Fully evaluate the symlink in the scope of the container rootfs.
<del> hostPath, err := container.GetResourcePath(absPath)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> linkTarget, err = filepath.Rel(container.basefs, hostPath)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> // Make it an absolute path.
<del> linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
<del> }
<del>
<del> return &types.ContainerPathStat{
<del> Name: filepath.Base(absPath),
<del> Size: lstat.Size(),
<del> Mode: lstat.Mode(),
<del> Mtime: lstat.ModTime(),
<del> LinkTarget: linkTarget,
<del> }, nil
<del>}
<del>
<ide> // containerStatPath stats the filesystem resource at the specified path in this
<ide> // container. Returns stat info about the resource.
<del>func (daemon *Daemon) containerStatPath(container *Container, path string) (stat *types.ContainerPathStat, err error) {
<add>func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (daemon *Daemon) containerStatPath(container *Container, path string) (stat
<ide> defer daemon.Unmount(container)
<ide>
<ide> err = daemon.mountVolumes(container)
<del> defer container.unmountVolumes(true)
<add> defer container.UnmountVolumes(true)
<ide> if err != nil {
<ide> return nil, err
<ide> }
<ide>
<del> resolvedPath, absPath, err := container.resolvePath(path)
<add> resolvedPath, absPath, err := container.ResolvePath(path)
<ide> if err != nil {
<ide> return nil, err
<ide> }
<ide>
<del> return container.statPath(resolvedPath, absPath)
<add> return container.StatPath(resolvedPath, absPath)
<ide> }
<ide>
<ide> // containerArchivePath creates an archive of the filesystem resource at the specified
<ide> // path in this container. Returns a tar archive of the resource and stat info
<ide> // about the resource.
<del>func (daemon *Daemon) containerArchivePath(container *Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
<add>func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) {
<ide> container.Lock()
<ide>
<ide> defer func() {
<ide> func (daemon *Daemon) containerArchivePath(container *Container, path string) (c
<ide> defer func() {
<ide> if err != nil {
<ide> // unmount any volumes
<del> container.unmountVolumes(true)
<add> container.UnmountVolumes(true)
<ide> // unmount the container's rootfs
<ide> daemon.Unmount(container)
<ide> }
<ide> func (daemon *Daemon) containerArchivePath(container *Container, path string) (c
<ide> return nil, nil, err
<ide> }
<ide>
<del> resolvedPath, absPath, err := container.resolvePath(path)
<add> resolvedPath, absPath, err := container.ResolvePath(path)
<ide> if err != nil {
<ide> return nil, nil, err
<ide> }
<ide>
<del> stat, err = container.statPath(resolvedPath, absPath)
<add> stat, err = container.StatPath(resolvedPath, absPath)
<ide> if err != nil {
<ide> return nil, nil, err
<ide> }
<ide> func (daemon *Daemon) containerArchivePath(container *Container, path string) (c
<ide>
<ide> content = ioutils.NewReadCloserWrapper(data, func() error {
<ide> err := data.Close()
<del> container.unmountVolumes(true)
<add> container.UnmountVolumes(true)
<ide> daemon.Unmount(container)
<ide> container.Unlock()
<ide> return err
<ide> func (daemon *Daemon) containerArchivePath(container *Container, path string) (c
<ide> // noOverwriteDirNonDir is true then it will be an error if unpacking the
<ide> // given content would cause an existing directory to be replaced with a non-
<ide> // directory and vice versa.
<del>func (daemon *Daemon) containerExtractToDir(container *Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
<add>func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (daemon *Daemon) containerExtractToDir(container *Container, path string, n
<ide> defer daemon.Unmount(container)
<ide>
<ide> err = daemon.mountVolumes(container)
<del> defer container.unmountVolumes(true)
<add> defer container.UnmountVolumes(true)
<ide> if err != nil {
<ide> return err
<ide> }
<ide>
<ide> // The destination path needs to be resolved to a host path, with all
<ide> // symbolic links followed in the scope of the container's rootfs. Note
<del> // that we do not use `container.resolvePath(path)` here because we need
<add> // that we do not use `container.ResolvePath(path)` here because we need
<ide> // to also evaluate the last path element if it is a symlink. This is so
<ide> // that you can extract an archive to a symlink that points to a directory.
<ide>
<ide> func (daemon *Daemon) containerExtractToDir(container *Container, path string, n
<ide> // a volume file path.
<ide> var baseRel string
<ide> if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
<del> if strings.HasPrefix(resolvedPath, container.basefs) {
<del> baseRel = resolvedPath[len(container.basefs):]
<add> if strings.HasPrefix(resolvedPath, container.BaseFS) {
<add> baseRel = resolvedPath[len(container.BaseFS):]
<ide> if baseRel[:1] == `\` {
<ide> baseRel = baseRel[1:]
<ide> }
<ide> }
<ide> } else {
<del> baseRel, err = filepath.Rel(container.basefs, resolvedPath)
<add> baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
<ide> }
<ide> if err != nil {
<ide> return err
<ide> func (daemon *Daemon) containerExtractToDir(container *Container, path string, n
<ide> return err
<ide> }
<ide>
<del> if !toVolume && container.hostConfig.ReadonlyRootfs {
<add> if !toVolume && container.HostConfig.ReadonlyRootfs {
<ide> return ErrRootFSReadOnly
<ide> }
<ide>
<ide> func (daemon *Daemon) containerExtractToDir(container *Container, path string, n
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) containerCopy(container *Container, resource string) (rc io.ReadCloser, err error) {
<add>func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) {
<ide> container.Lock()
<ide>
<ide> defer func() {
<ide> func (daemon *Daemon) containerCopy(container *Container, resource string) (rc i
<ide> defer func() {
<ide> if err != nil {
<ide> // unmount any volumes
<del> container.unmountVolumes(true)
<add> container.UnmountVolumes(true)
<ide> // unmount the container's rootfs
<ide> daemon.Unmount(container)
<ide> }
<ide> func (daemon *Daemon) containerCopy(container *Container, resource string) (rc i
<ide>
<ide> reader := ioutils.NewReadCloserWrapper(archive, func() error {
<ide> err := archive.Close()
<del> container.unmountVolumes(true)
<add> container.UnmountVolumes(true)
<ide> daemon.Unmount(container)
<ide> container.Unlock()
<ide> return err
<ide><path>daemon/archive_unix.go
<ide>
<ide> package daemon
<ide>
<add>import "github.com/docker/docker/container"
<add>
<ide> // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
<ide> // cannot be in a read-only volume. If it is not in a volume, the container
<ide> // cannot be configured with a read-only rootfs.
<del>func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
<add>func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
<ide> var toVolume bool
<ide> for _, mnt := range container.MountPoints {
<ide> if toVolume = mnt.HasResource(absPath); toVolume {
<ide><path>daemon/archive_windows.go
<ide> package daemon
<ide>
<add>import "github.com/docker/docker/container"
<add>
<ide> // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
<ide> // cannot be in a read-only volume. If it is not in a volume, the container
<ide> // cannot be configured with a read-only rootfs.
<ide> //
<ide> // This is a no-op on Windows which does not support read-only volumes, or
<ide> // extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP4
<del>func checkIfPathIsInAVolume(container *Container, absPath string) (bool, error) {
<add>func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
<ide> return false, nil
<ide> }
<ide><path>daemon/attach.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/logger"
<ide> "github.com/docker/docker/pkg/stdcopy"
<ide> )
<ide> func (daemon *Daemon) ContainerWsAttachWithLogs(prefixOrName string, c *Containe
<ide> return daemon.attachWithLogs(container, c.InStream, c.OutStream, c.ErrStream, c.Logs, c.Stream)
<ide> }
<ide>
<del>func (daemon *Daemon) attachWithLogs(container *Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
<add>func (daemon *Daemon) attachWithLogs(container *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error {
<ide> if logs {
<ide> logDriver, err := daemon.getLogger(container)
<ide> if err != nil {
<ide><path>daemon/commit.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/docker/distribution/reference"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/dockerversion"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/layer"
<ide> func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (string, err
<ide> return "", fmt.Errorf("Windows does not support commit of a running container")
<ide> }
<ide>
<del> if c.Pause && !container.isPaused() {
<add> if c.Pause && !container.IsPaused() {
<ide> daemon.containerPause(container)
<ide> defer daemon.containerUnpause(container)
<ide> }
<ide> func (daemon *Daemon) Commit(name string, c *ContainerCommitConfig) (string, err
<ide> return id.String(), nil
<ide> }
<ide>
<del>func (daemon *Daemon) exportContainerRw(container *Container) (archive.Archive, error) {
<add>func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) {
<ide> if err := daemon.Mount(container); err != nil {
<ide> return nil, err
<ide> }
<ide>
<del> archive, err := container.rwlayer.TarStream()
<add> archive, err := container.RWLayer.TarStream()
<ide> if err != nil {
<ide> return nil, err
<ide> }
<ide><path>daemon/container_operations.go
<add>package daemon
<add>
<add>import "errors"
<add>
<add>var (
<add> // ErrRootFSReadOnly is returned when a container
<add> // rootfs is marked readonly.
<add> ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
<add>)
<ide><path>daemon/container_operations_unix.go
<add>// +build linux freebsd
<add>
<add>package daemon
<add>
<add>import (
<add> "fmt"
<add> "os"
<add> "path"
<add> "path/filepath"
<add> "strconv"
<add> "strings"
<add> "syscall"
<add> "time"
<add>
<add> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<add> "github.com/docker/docker/daemon/execdriver"
<add> "github.com/docker/docker/daemon/links"
<add> "github.com/docker/docker/daemon/network"
<add> derr "github.com/docker/docker/errors"
<add> "github.com/docker/docker/pkg/fileutils"
<add> "github.com/docker/docker/pkg/idtools"
<add> "github.com/docker/docker/pkg/mount"
<add> "github.com/docker/docker/pkg/stringid"
<add> "github.com/docker/docker/pkg/ulimit"
<add> "github.com/docker/docker/runconfig"
<add> "github.com/docker/libnetwork"
<add> "github.com/docker/libnetwork/netlabel"
<add> "github.com/docker/libnetwork/options"
<add> "github.com/opencontainers/runc/libcontainer/configs"
<add> "github.com/opencontainers/runc/libcontainer/devices"
<add> "github.com/opencontainers/runc/libcontainer/label"
<add>)
<add>
<add>func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
<add> var env []string
<add> children, err := daemon.children(container.Name)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> bridgeSettings := container.NetworkSettings.Networks["bridge"]
<add> if bridgeSettings == nil {
<add> return nil, nil
<add> }
<add>
<add> if len(children) > 0 {
<add> for linkAlias, child := range children {
<add> if !child.IsRunning() {
<add> return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias)
<add> }
<add>
<add> childBridgeSettings := child.NetworkSettings.Networks["bridge"]
<add> if childBridgeSettings == nil {
<add> return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
<add> }
<add>
<add> link := links.NewLink(
<add> bridgeSettings.IPAddress,
<add> childBridgeSettings.IPAddress,
<add> linkAlias,
<add> child.Config.Env,
<add> child.Config.ExposedPorts,
<add> )
<add>
<add> for _, envVar := range link.ToEnv() {
<add> env = append(env, envVar)
<add> }
<add> }
<add> }
<add> return env, nil
<add>}
<add>
<add>func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
<add> var en *execdriver.Network
<add> if !c.Config.NetworkDisabled {
<add> en = &execdriver.Network{}
<add> if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() {
<add> en.NamespacePath = c.NetworkSettings.SandboxKey
<add> }
<add>
<add> if c.HostConfig.NetworkMode.IsContainer() {
<add> nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer())
<add> if err != nil {
<add> return err
<add> }
<add> en.ContainerID = nc.ID
<add> }
<add> }
<add>
<add> ipc := &execdriver.Ipc{}
<add> var err error
<add> c.ShmPath, err = c.ShmResourcePath()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> c.MqueuePath, err = c.MqueueResourcePath()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if c.HostConfig.IpcMode.IsContainer() {
<add> ic, err := daemon.getIpcContainer(c)
<add> if err != nil {
<add> return err
<add> }
<add> ipc.ContainerID = ic.ID
<add> c.ShmPath = ic.ShmPath
<add> c.MqueuePath = ic.MqueuePath
<add> } else {
<add> ipc.HostIpc = c.HostConfig.IpcMode.IsHost()
<add> if ipc.HostIpc {
<add> if _, err := os.Stat("/dev/shm"); err != nil {
<add> return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
<add> }
<add> if _, err := os.Stat("/dev/mqueue"); err != nil {
<add> return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
<add> }
<add> c.ShmPath = "/dev/shm"
<add> c.MqueuePath = "/dev/mqueue"
<add> }
<add> }
<add>
<add> pid := &execdriver.Pid{}
<add> pid.HostPid = c.HostConfig.PidMode.IsHost()
<add>
<add> uts := &execdriver.UTS{
<add> HostUTS: c.HostConfig.UTSMode.IsHost(),
<add> }
<add>
<add> // Build lists of devices allowed and created within the container.
<add> var userSpecifiedDevices []*configs.Device
<add> for _, deviceMapping := range c.HostConfig.Devices {
<add> devs, err := getDevicesFromPath(deviceMapping)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> userSpecifiedDevices = append(userSpecifiedDevices, devs...)
<add> }
<add>
<add> allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)
<add>
<add> autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)
<add>
<add> var rlimits []*ulimit.Rlimit
<add> ulimits := c.HostConfig.Ulimits
<add>
<add> // Merge ulimits with daemon defaults
<add> ulIdx := make(map[string]*ulimit.Ulimit)
<add> for _, ul := range ulimits {
<add> ulIdx[ul.Name] = ul
<add> }
<add> for name, ul := range daemon.configStore.Ulimits {
<add> if _, exists := ulIdx[name]; !exists {
<add> ulimits = append(ulimits, ul)
<add> }
<add> }
<add>
<add> weightDevices, err := getBlkioWeightDevices(c.HostConfig)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> for _, limit := range ulimits {
<add> rl, err := limit.GetRlimit()
<add> if err != nil {
<add> return err
<add> }
<add> rlimits = append(rlimits, rl)
<add> }
<add>
<add> resources := &execdriver.Resources{
<add> CommonResources: execdriver.CommonResources{
<add> Memory: c.HostConfig.Memory,
<add> MemoryReservation: c.HostConfig.MemoryReservation,
<add> CPUShares: c.HostConfig.CPUShares,
<add> BlkioWeight: c.HostConfig.BlkioWeight,
<add> },
<add> MemorySwap: c.HostConfig.MemorySwap,
<add> KernelMemory: c.HostConfig.KernelMemory,
<add> CpusetCpus: c.HostConfig.CpusetCpus,
<add> CpusetMems: c.HostConfig.CpusetMems,
<add> CPUPeriod: c.HostConfig.CPUPeriod,
<add> CPUQuota: c.HostConfig.CPUQuota,
<add> Rlimits: rlimits,
<add> BlkioWeightDevice: weightDevices,
<add> OomKillDisable: c.HostConfig.OomKillDisable,
<add> MemorySwappiness: *c.HostConfig.MemorySwappiness,
<add> }
<add>
<add> processConfig := execdriver.ProcessConfig{
<add> CommonProcessConfig: execdriver.CommonProcessConfig{
<add> Entrypoint: c.Path,
<add> Arguments: c.Args,
<add> Tty: c.Config.Tty,
<add> },
<add> Privileged: c.HostConfig.Privileged,
<add> User: c.Config.User,
<add> }
<add>
<add> processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
<add> processConfig.Env = env
<add>
<add> remappedRoot := &execdriver.User{}
<add> rootUID, rootGID := daemon.GetRemappedUIDGID()
<add> if rootUID != 0 {
<add> remappedRoot.UID = rootUID
<add> remappedRoot.GID = rootGID
<add> }
<add> uidMap, gidMap := daemon.GetUIDGIDMaps()
<add>
<add> c.Command = &execdriver.Command{
<add> CommonCommand: execdriver.CommonCommand{
<add> ID: c.ID,
<add> InitPath: "/.dockerinit",
<add> MountLabel: c.GetMountLabel(),
<add> Network: en,
<add> ProcessConfig: processConfig,
<add> ProcessLabel: c.GetProcessLabel(),
<add> Rootfs: c.BaseFS,
<add> Resources: resources,
<add> WorkingDir: c.Config.WorkingDir,
<add> },
<add> AllowedDevices: allowedDevices,
<add> AppArmorProfile: c.AppArmorProfile,
<add> AutoCreatedDevices: autoCreatedDevices,
<add> CapAdd: c.HostConfig.CapAdd.Slice(),
<add> CapDrop: c.HostConfig.CapDrop.Slice(),
<add> CgroupParent: c.HostConfig.CgroupParent,
<add> GIDMapping: gidMap,
<add> GroupAdd: c.HostConfig.GroupAdd,
<add> Ipc: ipc,
<add> OomScoreAdj: c.HostConfig.OomScoreAdj,
<add> Pid: pid,
<add> ReadonlyRootfs: c.HostConfig.ReadonlyRootfs,
<add> RemappedRoot: remappedRoot,
<add> UIDMapping: uidMap,
<add> UTS: uts,
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// getSize returns the real size & virtual size of the container.
<add>func (daemon *Daemon) getSize(container *container.Container) (int64, int64) {
<add> var (
<add> sizeRw, sizeRootfs int64
<add> err error
<add> )
<add>
<add> if err := daemon.Mount(container); err != nil {
<add> logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
<add> return sizeRw, sizeRootfs
<add> }
<add> defer daemon.Unmount(container)
<add>
<add> sizeRw, err = container.RWLayer.Size()
<add> if err != nil {
<add> logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err)
<add> // FIXME: GetSize should return an error. Not changing it now in case
<add> // there is a side-effect.
<add> sizeRw = -1
<add> }
<add>
<add> if parent := container.RWLayer.Parent(); parent != nil {
<add> sizeRootfs, err = parent.Size()
<add> if err != nil {
<add> sizeRootfs = -1
<add> } else if sizeRw != -1 {
<add> sizeRootfs += sizeRw
<add> }
<add> }
<add> return sizeRw, sizeRootfs
<add>}
<add>
<add>func (daemon *Daemon) buildSandboxOptions(container *container.Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
<add> var (
<add> sboxOptions []libnetwork.SandboxOption
<add> err error
<add> dns []string
<add> dnsSearch []string
<add> dnsOptions []string
<add> )
<add>
<add> sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname),
<add> libnetwork.OptionDomainname(container.Config.Domainname))
<add>
<add> if container.HostConfig.NetworkMode.IsHost() {
<add> sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox())
<add> sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts"))
<add> sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf"))
<add> } else if daemon.execDriver.SupportsHooks() {
<add> // OptionUseExternalKey is mandatory for userns support.
<add> // But optional for non-userns support
<add> sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
<add> }
<add>
<add> container.HostsPath, err = container.GetRootResourcePath("hosts")
<add> if err != nil {
<add> return nil, err
<add> }
<add> sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath))
<add>
<add> container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf")
<add> if err != nil {
<add> return nil, err
<add> }
<add> sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath))
<add>
<add> if len(container.HostConfig.DNS) > 0 {
<add> dns = container.HostConfig.DNS
<add> } else if len(daemon.configStore.DNS) > 0 {
<add> dns = daemon.configStore.DNS
<add> }
<add>
<add> for _, d := range dns {
<add> sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d))
<add> }
<add>
<add> if len(container.HostConfig.DNSSearch) > 0 {
<add> dnsSearch = container.HostConfig.DNSSearch
<add> } else if len(daemon.configStore.DNSSearch) > 0 {
<add> dnsSearch = daemon.configStore.DNSSearch
<add> }
<add>
<add> for _, ds := range dnsSearch {
<add> sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds))
<add> }
<add>
<add> if len(container.HostConfig.DNSOptions) > 0 {
<add> dnsOptions = container.HostConfig.DNSOptions
<add> } else if len(daemon.configStore.DNSOptions) > 0 {
<add> dnsOptions = daemon.configStore.DNSOptions
<add> }
<add>
<add> for _, ds := range dnsOptions {
<add> sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds))
<add> }
<add>
<add> if container.NetworkSettings.SecondaryIPAddresses != nil {
<add> name := container.Config.Hostname
<add> if container.Config.Domainname != "" {
<add> name = name + "." + container.Config.Domainname
<add> }
<add>
<add> for _, a := range container.NetworkSettings.SecondaryIPAddresses {
<add> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr))
<add> }
<add> }
<add>
<add> for _, extraHost := range container.HostConfig.ExtraHosts {
<add> // allow IPv6 addresses in extra hosts; only split on first ":"
<add> parts := strings.SplitN(extraHost, ":", 2)
<add> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1]))
<add> }
<add>
<add> // Link feature is supported only for the default bridge network.
<add> // return if this call to build join options is not for default bridge network
<add> if n.Name() != "bridge" {
<add> return sboxOptions, nil
<add> }
<add>
<add> ep, _ := container.GetEndpointInNetwork(n)
<add> if ep == nil {
<add> return sboxOptions, nil
<add> }
<add>
<add> var childEndpoints, parentEndpoints []string
<add>
<add> children, err := daemon.children(container.Name)
<add> if err != nil {
<add> return nil, err
<add> }
<add>
<add> for linkAlias, child := range children {
<add> if !isLinkable(child) {
<add> return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
<add> }
<add> _, alias := path.Split(linkAlias)
<add> // allow access to the linked container via the alias, real name, and container hostname
<add> aliasList := alias + " " + child.Config.Hostname
<add> // only add the name if alias isn't equal to the name
<add> if alias != child.Name[1:] {
<add> aliasList = aliasList + " " + child.Name[1:]
<add> }
<add> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress))
<add> cEndpoint, _ := child.GetEndpointInNetwork(n)
<add> if cEndpoint != nil && cEndpoint.ID() != "" {
<add> childEndpoints = append(childEndpoints, cEndpoint.ID())
<add> }
<add> }
<add>
<add> bridgeSettings := container.NetworkSettings.Networks["bridge"]
<add> refs := daemon.containerGraph().RefPaths(container.ID)
<add> for _, ref := range refs {
<add> if ref.ParentID == "0" {
<add> continue
<add> }
<add>
<add> c, err := daemon.Get(ref.ParentID)
<add> if err != nil {
<add> logrus.Error(err)
<add> }
<add>
<add> if c != nil && !daemon.configStore.DisableBridge && container.HostConfig.NetworkMode.IsPrivate() {
<add> logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
<add> sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress))
<add> if ep.ID() != "" {
<add> parentEndpoints = append(parentEndpoints, ep.ID())
<add> }
<add> }
<add> }
<add>
<add> linkOptions := options.Generic{
<add> netlabel.GenericData: options.Generic{
<add> "ParentEndpoints": parentEndpoints,
<add> "ChildEndpoints": childEndpoints,
<add> },
<add> }
<add>
<add> sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions))
<add>
<add> return sboxOptions, nil
<add>}
<add>
<add>func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network) error {
<add> if container.NetworkSettings == nil {
<add> container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
<add> }
<add>
<add> if !container.HostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
<add> return runconfig.ErrConflictHostNetwork
<add> }
<add>
<add> for s := range container.NetworkSettings.Networks {
<add> sn, err := daemon.FindNetwork(s)
<add> if err != nil {
<add> continue
<add> }
<add>
<add> if sn.Name() == n.Name() {
<add> // Avoid duplicate config
<add> return nil
<add> }
<add> if !runconfig.NetworkMode(sn.Type()).IsPrivate() ||
<add> !runconfig.NetworkMode(n.Type()).IsPrivate() {
<add> return runconfig.ErrConflictSharedNetwork
<add> }
<add> if runconfig.NetworkMode(sn.Name()).IsNone() ||
<add> runconfig.NetworkMode(n.Name()).IsNone() {
<add> return runconfig.ErrConflictNoNetwork
<add> }
<add> }
<add> container.NetworkSettings.Networks[n.Name()] = new(network.EndpointSettings)
<add>
<add> return nil
<add>}
<add>
<add>func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
<add> if err := container.BuildEndpointInfo(n, ep); err != nil {
<add> return err
<add> }
<add>
<add> if container.HostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
<add> container.NetworkSettings.Bridge = daemon.configStore.Bridge.Iface
<add> }
<add>
<add> return nil
<add>}
<add>
<add>// UpdateNetwork is used to update the container's network (e.g. when linked containers
<add>// get removed/unlinked).
<add>func (daemon *Daemon) updateNetwork(container *container.Container) error {
<add> ctrl := daemon.netController
<add> sid := container.NetworkSettings.SandboxID
<add>
<add> sb, err := ctrl.SandboxByID(sid)
<add> if err != nil {
<add> return derr.ErrorCodeNoSandbox.WithArgs(sid, err)
<add> }
<add>
<add> // Find if container is connected to the default bridge network
<add> var n libnetwork.Network
<add> for name := range container.NetworkSettings.Networks {
<add> sn, err := daemon.FindNetwork(name)
<add> if err != nil {
<add> continue
<add> }
<add> if sn.Name() == "bridge" {
<add> n = sn
<add> break
<add> }
<add> }
<add>
<add> if n == nil {
<add> // Not connected to the default bridge network; Nothing to do
<add> return nil
<add> }
<add>
<add> options, err := daemon.buildSandboxOptions(container, n)
<add> if err != nil {
<add> return derr.ErrorCodeNetworkUpdate.WithArgs(err)
<add> }
<add>
<add> if err := sb.Refresh(options...); err != nil {
<add> return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err)
<add> }
<add>
<add> return nil
<add>}
<add>
<add>func (daemon *Daemon) allocateNetwork(container *container.Container) error {
<add> controller := daemon.netController
<add>
<add> // Cleanup any stale sandbox left over due to ungraceful daemon shutdown
<add> if err := controller.SandboxDestroy(container.ID); err != nil {
<add> logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
<add> }
<add>
<add> updateSettings := false
<add> if len(container.NetworkSettings.Networks) == 0 {
<add> mode := container.HostConfig.NetworkMode
<add> if container.Config.NetworkDisabled || mode.IsContainer() {
<add> return nil
<add> }
<add>
<add> networkName := mode.NetworkName()
<add> if mode.IsDefault() {
<add> networkName = controller.Config().Daemon.DefaultNetwork
<add> }
<add> if mode.IsUserDefined() {
<add> n, err := daemon.FindNetwork(networkName)
<add> if err != nil {
<add> return err
<add> }
<add> networkName = n.Name()
<add> }
<add> container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
<add> container.NetworkSettings.Networks[networkName] = new(network.EndpointSettings)
<add> updateSettings = true
<add> }
<add>
<add> for n := range container.NetworkSettings.Networks {
<add> if err := daemon.connectToNetwork(container, n, updateSettings); err != nil {
<add> return err
<add> }
<add> }
<add>
<add> return container.WriteHostConfig()
<add>}
<add>
<add>func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox {
<add> var sb libnetwork.Sandbox
<add> daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
<add> if s.ContainerID() == container.ID {
<add> sb = s
<add> return true
<add> }
<add> return false
<add> })
<add> return sb
<add>}
<add>
<add>// ConnectToNetwork connects a container to a network
<add>func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string) error {
<add> if !container.Running {
<add> return derr.ErrorCodeNotRunning.WithArgs(container.ID)
<add> }
<add> if err := daemon.connectToNetwork(container, idOrName, true); err != nil {
<add> return err
<add> }
<add> if err := container.ToDiskLocking(); err != nil {
<add> return fmt.Errorf("Error saving container to disk: %v", err)
<add> }
<add> return nil
<add>}
<add>
<add>func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, updateSettings bool) (err error) {
<add> if container.HostConfig.NetworkMode.IsContainer() {
<add> return runconfig.ErrConflictSharedNetwork
<add> }
<add>
<add> if runconfig.NetworkMode(idOrName).IsBridge() &&
<add> daemon.configStore.DisableBridge {
<add> container.Config.NetworkDisabled = true
<add> return nil
<add> }
<add>
<add> controller := daemon.netController
<add>
<add> n, err := daemon.FindNetwork(idOrName)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if updateSettings {
<add> if err := daemon.updateNetworkSettings(container, n); err != nil {
<add> return err
<add> }
<add> }
<add>
<add> ep, err := container.GetEndpointInNetwork(n)
<add> if err == nil {
<add> return fmt.Errorf("container already connected to network %s", idOrName)
<add> }
<add>
<add> if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
<add> return err
<add> }
<add>
<add> createOptions, err := container.BuildCreateEndpointOptions(n)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> endpointName := strings.TrimPrefix(container.Name, "/")
<add> ep, err = n.CreateEndpoint(endpointName, createOptions...)
<add> if err != nil {
<add> return err
<add> }
<add> defer func() {
<add> if err != nil {
<add> if e := ep.Delete(); e != nil {
<add> logrus.Warnf("Could not rollback container connection to network %s", idOrName)
<add> }
<add> }
<add> }()
<add>
<add> if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
<add> return err
<add> }
<add>
<add> sb := daemon.getNetworkSandbox(container)
<add> if sb == nil {
<add> options, err := daemon.buildSandboxOptions(container, n)
<add> if err != nil {
<add> return err
<add> }
<add> sb, err = controller.NewSandbox(container.ID, options...)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> container.UpdateSandboxNetworkSettings(sb)
<add> }
<add>
<add> if err := ep.Join(sb); err != nil {
<add> return err
<add> }
<add>
<add> if err := container.UpdateJoinInfo(n, ep); err != nil {
<add> return derr.ErrorCodeJoinInfo.WithArgs(err)
<add> }
<add>
<add> return nil
<add>}
<add>
<add>func (daemon *Daemon) initializeNetworking(container *container.Container) error {
<add> var err error
<add>
<add> if container.HostConfig.NetworkMode.IsContainer() {
<add> // we need to get the hosts files from the container to join
<add> nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer())
<add> if err != nil {
<add> return err
<add> }
<add> container.HostnamePath = nc.HostnamePath
<add> container.HostsPath = nc.HostsPath
<add> container.ResolvConfPath = nc.ResolvConfPath
<add> container.Config.Hostname = nc.Config.Hostname
<add> container.Config.Domainname = nc.Config.Domainname
<add> return nil
<add> }
<add>
<add> if container.HostConfig.NetworkMode.IsHost() {
<add> container.Config.Hostname, err = os.Hostname()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> parts := strings.SplitN(container.Config.Hostname, ".", 2)
<add> if len(parts) > 1 {
<add> container.Config.Hostname = parts[0]
<add> container.Config.Domainname = parts[1]
<add> }
<add>
<add> }
<add>
<add> if err := daemon.allocateNetwork(container); err != nil {
<add> return err
<add> }
<add>
<add> return container.BuildHostnameFile()
<add>}
<add>
<add>// called from the libcontainer pre-start hook to set the network
<add>// namespace configuration linkage to the libnetwork "sandbox" entity
<add>func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
<add> path := fmt.Sprintf("/proc/%d/ns/net", pid)
<add> var sandbox libnetwork.Sandbox
<add> search := libnetwork.SandboxContainerWalker(&sandbox, containerID)
<add> daemon.netController.WalkSandboxes(search)
<add> if sandbox == nil {
<add> return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found")
<add> }
<add>
<add> return sandbox.SetKey(path)
<add>}
<add>
<add>func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) {
<add> containerID := container.HostConfig.IpcMode.Container()
<add> c, err := daemon.Get(containerID)
<add> if err != nil {
<add> return nil, err
<add> }
<add> if !c.IsRunning() {
<add> return nil, derr.ErrorCodeIPCRunning
<add> }
<add> return c, nil
<add>}
<add>
<add>func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) {
<add> nc, err := daemon.Get(connectedContainerID)
<add> if err != nil {
<add> return nil, err
<add> }
<add> if containerID == nc.ID {
<add> return nil, derr.ErrorCodeJoinSelf
<add> }
<add> if !nc.IsRunning() {
<add> return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID)
<add> }
<add> return nc, nil
<add>}
<add>
<add>func (daemon *Daemon) releaseNetwork(container *container.Container) {
<add> if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
<add> return
<add> }
<add>
<add> sid := container.NetworkSettings.SandboxID
<add> networks := container.NetworkSettings.Networks
<add> for n := range networks {
<add> networks[n] = &network.EndpointSettings{}
<add> }
<add>
<add> container.NetworkSettings = &network.Settings{Networks: networks}
<add>
<add> if sid == "" || len(networks) == 0 {
<add> return
<add> }
<add>
<add> sb, err := daemon.netController.SandboxByID(sid)
<add> if err != nil {
<add> logrus.Errorf("error locating sandbox id %s: %v", sid, err)
<add> return
<add> }
<add>
<add> if err := sb.Delete(); err != nil {
<add> logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
<add> }
<add>}
<add>
<add>func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
<add> rootUID, rootGID := daemon.GetRemappedUIDGID()
<add> if !c.HasMountFor("/dev/shm") {
<add> shmPath, err := c.ShmResourcePath()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
<add> return err
<add> }
<add>
<add> shmSize := container.DefaultSHMSize
<add> if c.HostConfig.ShmSize != nil {
<add> shmSize = *c.HostConfig.ShmSize
<add> }
<add> shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
<add> if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
<add> return fmt.Errorf("mounting shm tmpfs: %s", err)
<add> }
<add> if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
<add> return err
<add> }
<add> }
<add>
<add> if !c.HasMountFor("/dev/mqueue") {
<add> mqueuePath, err := c.MqueueResourcePath()
<add> if err != nil {
<add> return err
<add> }
<add>
<add> if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil {
<add> return err
<add> }
<add>
<add> if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil {
<add> return fmt.Errorf("mounting mqueue mqueue : %s", err)
<add> }
<add> if err := os.Chown(mqueuePath, rootUID, rootGID); err != nil {
<add> return err
<add> }
<add> }
<add>
<add> return nil
<add>}
<add>
<add>func (daemon *Daemon) mountVolumes(container *container.Container) error {
<add> mounts, err := daemon.setupMounts(container)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> for _, m := range mounts {
<add> dest, err := container.GetResourcePath(m.Destination)
<add> if err != nil {
<add> return err
<add> }
<add>
<add> var stat os.FileInfo
<add> stat, err = os.Stat(m.Source)
<add> if err != nil {
<add> return err
<add> }
<add> if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
<add> return err
<add> }
<add>
<add> opts := "rbind,ro"
<add> if m.Writable {
<add> opts = "rbind,rw"
<add> }
<add>
<add> if err := mount.Mount(m.Source, dest, "bind", opts); err != nil {
<add> return err
<add> }
<add> }
<add>
<add> return nil
<add>}
<add>
<add>func killProcessDirectly(container *container.Container) error {
<add> if _, err := container.WaitStop(10 * time.Second); err != nil {
<add> // Ensure that we don't kill ourselves
<add> if pid := container.GetPID(); pid != 0 {
<add> logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
<add> if err := syscall.Kill(pid, 9); err != nil {
<add> if err != syscall.ESRCH {
<add> return err
<add> }
<add> logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
<add> }
<add> }
<add> }
<add> return nil
<add>}
<add>
<add>func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
<add> device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
<add> // if there was no error, return the device
<add> if err == nil {
<add> device.Path = deviceMapping.PathInContainer
<add> return append(devs, device), nil
<add> }
<add>
<add> // if the device is not a device node
<add> // try to see if it's a directory holding many devices
<add> if err == devices.ErrNotADevice {
<add>
<add> // check if it is a directory
<add> if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
<add>
<add> // mount the internal devices recursively
<add> filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
<add> childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
<add> if e != nil {
<add> // ignore the device
<add> return nil
<add> }
<add>
<add> // add the device to userSpecified devices
<add> childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
<add> devs = append(devs, childDevice)
<add>
<add> return nil
<add> })
<add> }
<add> }
<add>
<add> if len(devs) > 0 {
<add> return devs, nil
<add> }
<add>
<add> return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
<add>}
<add>
<add>func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {
<add> if len(userDevices) == 0 {
<add> return defaultDevices
<add> }
<add>
<add> paths := map[string]*configs.Device{}
<add> for _, d := range userDevices {
<add> paths[d.Path] = d
<add> }
<add>
<add> var devs []*configs.Device
<add> for _, d := range defaultDevices {
<add> if _, defined := paths[d.Path]; !defined {
<add> devs = append(devs, d)
<add> }
<add> }
<add> return append(devs, userDevices...)
<add>}
<add>
<add>func detachMounted(path string) error {
<add> return syscall.Unmount(path, syscall.MNT_DETACH)
<add>}
<add>
<add>func isLinkable(child *container.Container) bool {
<add> // A container is linkable only if it belongs to the default network
<add> _, ok := child.NetworkSettings.Networks["bridge"]
<add> return ok
<add>}
<add><path>daemon/container_operations_windows.go
<del><path>daemon/container_windows.go
<ide> package daemon
<ide> import (
<ide> "strings"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/layer"
<del> "github.com/docker/docker/volume"
<del> "github.com/docker/libnetwork"
<ide> )
<ide>
<del>// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
<del>// the container. Docker has no context of what the default path should be.
<del>const DefaultPathEnv = ""
<del>
<del>// Container holds fields specific to the Windows implementation. See
<del>// CommonContainer for standard fields common to all containers.
<del>type Container struct {
<del> CommonContainer
<del>
<del> // Fields below here are platform specific.
<del>}
<del>
<del>func killProcessDirectly(container *Container) error {
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
<add>func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
<ide> return nil, nil
<ide> }
<ide>
<del>func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
<del> // On Windows, nothing to link. Just return the container environment.
<del> return container.Config.Env
<del>}
<del>
<del>func (daemon *Daemon) initializeNetworking(container *Container) error {
<add>func (daemon *Daemon) initializeNetworking(container *container.Container) error {
<ide> return nil
<ide> }
<ide>
<ide> // ConnectToNetwork connects a container to the network
<del>func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
<add>func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string) error {
<ide> return nil
<ide> }
<ide>
<del>// DisconnectFromNetwork disconnects a container from, the network
<del>func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
<del> return nil
<del>}
<del>
<del>func (container *Container) setupWorkingDirectory() error {
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) populateCommand(c *Container, env []string) error {
<add>func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
<ide> en := &execdriver.Network{
<ide> Interface: nil,
<ide> }
<ide>
<del> parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2)
<add> parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
<ide> switch parts[0] {
<ide> case "none":
<ide> case "default", "": // empty string to support existing containers
<ide> if !c.Config.NetworkDisabled {
<ide> en.Interface = &execdriver.NetworkInterface{
<ide> MacAddress: c.Config.MacAddress,
<ide> Bridge: daemon.configStore.Bridge.VirtualSwitchName,
<del> PortBindings: c.hostConfig.PortBindings,
<add> PortBindings: c.HostConfig.PortBindings,
<ide>
<ide> // TODO Windows. Include IPAddress. There already is a
<ide> // property IPAddress on execDrive.CommonNetworkInterface,
<ide> func (daemon *Daemon) populateCommand(c *Container, env []string) error {
<ide> }
<ide> }
<ide> default:
<del> return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode)
<add> return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode)
<ide> }
<ide>
<ide> // TODO Windows. More resource controls to be implemented later.
<ide> resources := &execdriver.Resources{
<ide> CommonResources: execdriver.CommonResources{
<del> CPUShares: c.hostConfig.CPUShares,
<add> CPUShares: c.HostConfig.CPUShares,
<ide> },
<ide> }
<ide>
<ide> func (daemon *Daemon) populateCommand(c *Container, env []string) error {
<ide> Arguments: c.Args,
<ide> Tty: c.Config.Tty,
<ide> },
<del> ConsoleSize: c.hostConfig.ConsoleSize,
<add> ConsoleSize: c.HostConfig.ConsoleSize,
<ide> }
<ide>
<ide> processConfig.Env = env
<ide> func (daemon *Daemon) populateCommand(c *Container, env []string) error {
<ide> }
<ide> layerFolder := m["dir"]
<ide>
<del> c.command = &execdriver.Command{
<add> c.Command = &execdriver.Command{
<ide> CommonCommand: execdriver.CommonCommand{
<ide> ID: c.ID,
<del> Rootfs: c.rootfsPath(),
<add> Rootfs: c.BaseFS,
<ide> InitPath: "/.dockerinit",
<ide> WorkingDir: c.Config.WorkingDir,
<ide> Network: en,
<del> MountLabel: c.getMountLabel(),
<add> MountLabel: c.GetMountLabel(),
<ide> Resources: resources,
<ide> ProcessConfig: processConfig,
<del> ProcessLabel: c.getProcessLabel(),
<add> ProcessLabel: c.GetProcessLabel(),
<ide> },
<ide> FirstStart: !c.HasBeenStartedBefore,
<ide> LayerFolder: layerFolder,
<ide> LayerPaths: layerPaths,
<ide> Hostname: c.Config.Hostname,
<del> Isolation: c.hostConfig.Isolation,
<add> Isolation: c.HostConfig.Isolation,
<ide> ArgsEscaped: c.Config.ArgsEscaped,
<ide> }
<ide>
<ide> return nil
<ide> }
<ide>
<ide> // getSize returns real size & virtual size
<del>func (daemon *Daemon) getSize(container *Container) (int64, int64) {
<add>func (daemon *Daemon) getSize(container *container.Container) (int64, int64) {
<ide> // TODO Windows
<ide> return 0, 0
<ide> }
<ide> func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error
<ide> }
<ide>
<ide> // allocateNetwork is a no-op on Windows.
<del>func (daemon *Daemon) allocateNetwork(container *Container) error {
<add>func (daemon *Daemon) allocateNetwork(container *container.Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) updateNetwork(container *Container) error {
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) releaseNetwork(container *Container) {
<del>}
<del>
<del>// appendNetworkMounts appends any network mounts to the array of mount points passed in.
<del>// Windows does not support network mounts (not to be confused with SMB network mounts), so
<del>// this is a no-op.
<del>func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
<del> return volumeMounts, nil
<del>}
<del>
<del>func (daemon *Daemon) setupIpcDirs(container *Container) error {
<add>func (daemon *Daemon) updateNetwork(container *container.Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (container *Container) unmountIpcMounts(unmount func(pth string) error) {
<add>func (daemon *Daemon) releaseNetwork(container *container.Container) {
<ide> }
<ide>
<del>func detachMounted(path string) error {
<add>func (daemon *Daemon) setupIpcDirs(container *container.Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (container *Container) ipcMounts() []execdriver.Mount {
<add>// TODO Windows: Fix Post-TP4. This is a hack to allow docker cp to work
<add>// against containers which have volumes. You will still be able to cp
<add>// to somewhere on the container drive, but not to any mounted volumes
<add>// inside the container. Without this fix, docker cp is broken to any
<add>// container which has a volume, regardless of where the file is inside the
<add>// container.
<add>func (daemon *Daemon) mountVolumes(container *container.Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (container *Container) tmpfsMounts() []execdriver.Mount {
<add>func detachMounted(path string) error {
<ide> return nil
<ide> }
<ide>
<ide> func getDefaultRouteMtu() (int, error) {
<ide> return -1, errSystemNotSupported
<ide> }
<ide>
<del>// TODO Windows: Fix Post-TP4. This is a hack to allow docker cp to work
<del>// against containers which have volumes. You will still be able to cp
<del>// to somewhere on the container drive, but not to any mounted volumes
<del>// inside the container. Without this fix, docker cp is broken to any
<del>// container which has a volume, regardless of where the file is inside the
<del>// container.
<del>func (daemon *Daemon) mountVolumes(container *Container) error {
<del> return nil
<del>}
<del>func (container *Container) unmountVolumes(forceSyscall bool) error {
<add>func killProcessDirectly(container *container.Container) error {
<ide> return nil
<ide> }
<ide><path>daemon/container_unit_test.go
<del>package daemon
<del>
<del>import (
<del> "io/ioutil"
<del> "os"
<del> "path/filepath"
<del> "testing"
<del>
<del> "github.com/docker/docker/pkg/signal"
<del> "github.com/docker/docker/runconfig"
<del> "github.com/docker/docker/volume"
<del> "github.com/docker/docker/volume/drivers"
<del>)
<del>
<del>func TestGetFullName(t *testing.T) {
<del> name, err := GetFullContainerName("testing")
<del> if err != nil {
<del> t.Fatal(err)
<del> }
<del> if name != "/testing" {
<del> t.Fatalf("Expected /testing got %s", name)
<del> }
<del> if _, err := GetFullContainerName(""); err == nil {
<del> t.Fatal("Error should not be nil")
<del> }
<del>}
<del>
<del>func TestValidContainerNames(t *testing.T) {
<del> invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
<del> validNames := []string{"word-word", "word_word", "1weoid"}
<del>
<del> for _, name := range invalidNames {
<del> if validContainerNamePattern.MatchString(name) {
<del> t.Fatalf("%q is not a valid container name and was returned as valid.", name)
<del> }
<del> }
<del>
<del> for _, name := range validNames {
<del> if !validContainerNamePattern.MatchString(name) {
<del> t.Fatalf("%q is a valid container name and was returned as invalid.", name)
<del> }
<del> }
<del>}
<del>
<del>func TestContainerStopSignal(t *testing.T) {
<del> c := &Container{
<del> CommonContainer: CommonContainer{
<del> Config: &runconfig.Config{},
<del> },
<del> }
<del>
<del> def, err := signal.ParseSignal(signal.DefaultStopSignal)
<del> if err != nil {
<del> t.Fatal(err)
<del> }
<del>
<del> s := c.stopSignal()
<del> if s != int(def) {
<del> t.Fatalf("Expected %v, got %v", def, s)
<del> }
<del>
<del> c = &Container{
<del> CommonContainer: CommonContainer{
<del> Config: &runconfig.Config{StopSignal: "SIGKILL"},
<del> },
<del> }
<del> s = c.stopSignal()
<del> if s != 9 {
<del> t.Fatalf("Expected 9, got %v", s)
<del> }
<del>}
<del>
<del>func TestContainerInitDNS(t *testing.T) {
<del> tmp, err := ioutil.TempDir("", "docker-container-test-")
<del> if err != nil {
<del> t.Fatal(err)
<del> }
<del> defer os.RemoveAll(tmp)
<del>
<del> containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
<del> containerPath := filepath.Join(tmp, containerID)
<del> if err := os.MkdirAll(containerPath, 0755); err != nil {
<del> t.Fatal(err)
<del> }
<del>
<del> config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
<del>"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
<del>"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
<del>"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
<del>"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
<del>"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
<del>"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
<del>"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
<del>"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}},
<del>"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
<del>"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
<del>"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
<del>"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
<del>"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
<del>"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
<del>
<del> if err = ioutil.WriteFile(filepath.Join(containerPath, configFileName), []byte(config), 0644); err != nil {
<del> t.Fatal(err)
<del> }
<del>
<del> hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
<del>"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
<del>"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
<del>"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
<del> if err = ioutil.WriteFile(filepath.Join(containerPath, "hostconfig.json"), []byte(hostConfig), 0644); err != nil {
<del> t.Fatal(err)
<del> }
<del>
<del> daemon, err := initDaemonWithVolumeStore(tmp)
<del> if err != nil {
<del> t.Fatal(err)
<del> }
<del> defer volumedrivers.Unregister(volume.DefaultDriverName)
<del>
<del> c, err := daemon.load(containerID)
<del> if err != nil {
<del> t.Fatal(err)
<del> }
<del>
<del> if c.hostConfig.DNS == nil {
<del> t.Fatal("Expected container DNS to not be nil")
<del> }
<del>
<del> if c.hostConfig.DNSSearch == nil {
<del> t.Fatal("Expected container DNSSearch to not be nil")
<del> }
<del>
<del> if c.hostConfig.DNSOptions == nil {
<del> t.Fatal("Expected container DNSOptions to not be nil")
<del> }
<del>}
<ide><path>daemon/container_unix.go
<del>// +build linux freebsd
<del>
<del>package daemon
<del>
<del>import (
<del> "fmt"
<del> "io/ioutil"
<del> "net"
<del> "os"
<del> "path"
<del> "path/filepath"
<del> "strconv"
<del> "strings"
<del> "syscall"
<del> "time"
<del>
<del> "github.com/Sirupsen/logrus"
<del> "github.com/docker/docker/daemon/execdriver"
<del> "github.com/docker/docker/daemon/links"
<del> "github.com/docker/docker/daemon/network"
<del> derr "github.com/docker/docker/errors"
<del> "github.com/docker/docker/pkg/fileutils"
<del> "github.com/docker/docker/pkg/idtools"
<del> "github.com/docker/docker/pkg/mount"
<del> "github.com/docker/docker/pkg/nat"
<del> "github.com/docker/docker/pkg/stringid"
<del> "github.com/docker/docker/pkg/symlink"
<del> "github.com/docker/docker/pkg/system"
<del> "github.com/docker/docker/pkg/ulimit"
<del> "github.com/docker/docker/runconfig"
<del> "github.com/docker/docker/utils"
<del> "github.com/docker/docker/volume"
<del> "github.com/docker/libnetwork"
<del> "github.com/docker/libnetwork/netlabel"
<del> "github.com/docker/libnetwork/options"
<del> "github.com/docker/libnetwork/types"
<del> "github.com/opencontainers/runc/libcontainer/configs"
<del> "github.com/opencontainers/runc/libcontainer/devices"
<del> "github.com/opencontainers/runc/libcontainer/label"
<del>)
<del>
<del>const (
<del> // DefaultPathEnv is unix style list of directories to search for
<del> // executables. Each directory is separated from the next by a colon
<del> // ':' character .
<del> DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
<del>
<del> // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container
<del> DefaultSHMSize int64 = 67108864
<del>)
<del>
<del>// Container holds the fields specific to unixen implementations. See
<del>// CommonContainer for standard fields common to all containers.
<del>type Container struct {
<del> CommonContainer
<del>
<del> // Fields below here are platform specific.
<del> activeLinks map[string]*links.Link
<del> AppArmorProfile string
<del> HostnamePath string
<del> HostsPath string
<del> ShmPath string
<del> MqueuePath string
<del> ResolvConfPath string
<del>}
<del>
<del>func killProcessDirectly(container *Container) error {
<del> if _, err := container.WaitStop(10 * time.Second); err != nil {
<del> // Ensure that we don't kill ourselves
<del> if pid := container.GetPID(); pid != 0 {
<del> logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID))
<del> if err := syscall.Kill(pid, 9); err != nil {
<del> if err != syscall.ESRCH {
<del> return err
<del> }
<del> logrus.Debugf("Cannot kill process (pid=%d) with signal 9: no such process.", pid)
<del> }
<del> }
<del> }
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) setupLinkedContainers(container *Container) ([]string, error) {
<del> var env []string
<del> children, err := daemon.children(container.Name)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> bridgeSettings := container.NetworkSettings.Networks["bridge"]
<del> if bridgeSettings == nil {
<del> return nil, nil
<del> }
<del>
<del> if len(children) > 0 {
<del> for linkAlias, child := range children {
<del> if !child.IsRunning() {
<del> return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias)
<del> }
<del>
<del> childBridgeSettings := child.NetworkSettings.Networks["bridge"]
<del> if childBridgeSettings == nil {
<del> return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
<del> }
<del>
<del> link := links.NewLink(
<del> bridgeSettings.IPAddress,
<del> childBridgeSettings.IPAddress,
<del> linkAlias,
<del> child.Config.Env,
<del> child.Config.ExposedPorts,
<del> )
<del>
<del> for _, envVar := range link.ToEnv() {
<del> env = append(env, envVar)
<del> }
<del> }
<del> }
<del> return env, nil
<del>}
<del>
<del>func (container *Container) createDaemonEnvironment(linkedEnv []string) []string {
<del> // if a domain name was specified, append it to the hostname (see #7851)
<del> fullHostname := container.Config.Hostname
<del> if container.Config.Domainname != "" {
<del> fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname)
<del> }
<del> // Setup environment
<del> env := []string{
<del> "PATH=" + DefaultPathEnv,
<del> "HOSTNAME=" + fullHostname,
<del> // Note: we don't set HOME here because it'll get autoset intelligently
<del> // based on the value of USER inside dockerinit, but only if it isn't
<del> // set already (ie, that can be overridden by setting HOME via -e or ENV
<del> // in a Dockerfile).
<del> }
<del> if container.Config.Tty {
<del> env = append(env, "TERM=xterm")
<del> }
<del> env = append(env, linkedEnv...)
<del> // because the env on the container can override certain default values
<del> // we need to replace the 'env' keys where they match and append anything
<del> // else.
<del> env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env)
<del>
<del> return env
<del>}
<del>
<del>func getDevicesFromPath(deviceMapping runconfig.DeviceMapping) (devs []*configs.Device, err error) {
<del> device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions)
<del> // if there was no error, return the device
<del> if err == nil {
<del> device.Path = deviceMapping.PathInContainer
<del> return append(devs, device), nil
<del> }
<del>
<del> // if the device is not a device node
<del> // try to see if it's a directory holding many devices
<del> if err == devices.ErrNotADevice {
<del>
<del> // check if it is a directory
<del> if src, e := os.Stat(deviceMapping.PathOnHost); e == nil && src.IsDir() {
<del>
<del> // mount the internal devices recursively
<del> filepath.Walk(deviceMapping.PathOnHost, func(dpath string, f os.FileInfo, e error) error {
<del> childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions)
<del> if e != nil {
<del> // ignore the device
<del> return nil
<del> }
<del>
<del> // add the device to userSpecified devices
<del> childDevice.Path = strings.Replace(dpath, deviceMapping.PathOnHost, deviceMapping.PathInContainer, 1)
<del> devs = append(devs, childDevice)
<del>
<del> return nil
<del> })
<del> }
<del> }
<del>
<del> if len(devs) > 0 {
<del> return devs, nil
<del> }
<del>
<del> return devs, derr.ErrorCodeDeviceInfo.WithArgs(deviceMapping.PathOnHost, err)
<del>}
<del>
<del>func (daemon *Daemon) populateCommand(c *Container, env []string) error {
<del> var en *execdriver.Network
<del> if !c.Config.NetworkDisabled {
<del> en = &execdriver.Network{}
<del> if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() {
<del> en.NamespacePath = c.NetworkSettings.SandboxKey
<del> }
<del>
<del> if c.hostConfig.NetworkMode.IsContainer() {
<del> nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer())
<del> if err != nil {
<del> return err
<del> }
<del> en.ContainerID = nc.ID
<del> }
<del> }
<del>
<del> ipc := &execdriver.Ipc{}
<del> var err error
<del> c.ShmPath, err = c.shmPath()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> c.MqueuePath, err = c.mqueuePath()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if c.hostConfig.IpcMode.IsContainer() {
<del> ic, err := daemon.getIpcContainer(c)
<del> if err != nil {
<del> return err
<del> }
<del> ipc.ContainerID = ic.ID
<del> c.ShmPath = ic.ShmPath
<del> c.MqueuePath = ic.MqueuePath
<del> } else {
<del> ipc.HostIpc = c.hostConfig.IpcMode.IsHost()
<del> if ipc.HostIpc {
<del> if _, err := os.Stat("/dev/shm"); err != nil {
<del> return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
<del> }
<del> if _, err := os.Stat("/dev/mqueue"); err != nil {
<del> return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
<del> }
<del> c.ShmPath = "/dev/shm"
<del> c.MqueuePath = "/dev/mqueue"
<del> }
<del> }
<del>
<del> pid := &execdriver.Pid{}
<del> pid.HostPid = c.hostConfig.PidMode.IsHost()
<del>
<del> uts := &execdriver.UTS{
<del> HostUTS: c.hostConfig.UTSMode.IsHost(),
<del> }
<del>
<del> // Build lists of devices allowed and created within the container.
<del> var userSpecifiedDevices []*configs.Device
<del> for _, deviceMapping := range c.hostConfig.Devices {
<del> devs, err := getDevicesFromPath(deviceMapping)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> userSpecifiedDevices = append(userSpecifiedDevices, devs...)
<del> }
<del>
<del> allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)
<del>
<del> autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)
<del>
<del> var rlimits []*ulimit.Rlimit
<del> ulimits := c.hostConfig.Ulimits
<del>
<del> // Merge ulimits with daemon defaults
<del> ulIdx := make(map[string]*ulimit.Ulimit)
<del> for _, ul := range ulimits {
<del> ulIdx[ul.Name] = ul
<del> }
<del> for name, ul := range daemon.configStore.Ulimits {
<del> if _, exists := ulIdx[name]; !exists {
<del> ulimits = append(ulimits, ul)
<del> }
<del> }
<del>
<del> weightDevices, err := getBlkioWeightDevices(c.hostConfig)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> for _, limit := range ulimits {
<del> rl, err := limit.GetRlimit()
<del> if err != nil {
<del> return err
<del> }
<del> rlimits = append(rlimits, rl)
<del> }
<del>
<del> resources := &execdriver.Resources{
<del> CommonResources: execdriver.CommonResources{
<del> Memory: c.hostConfig.Memory,
<del> MemoryReservation: c.hostConfig.MemoryReservation,
<del> CPUShares: c.hostConfig.CPUShares,
<del> BlkioWeight: c.hostConfig.BlkioWeight,
<del> },
<del> MemorySwap: c.hostConfig.MemorySwap,
<del> KernelMemory: c.hostConfig.KernelMemory,
<del> CpusetCpus: c.hostConfig.CpusetCpus,
<del> CpusetMems: c.hostConfig.CpusetMems,
<del> CPUPeriod: c.hostConfig.CPUPeriod,
<del> CPUQuota: c.hostConfig.CPUQuota,
<del> Rlimits: rlimits,
<del> BlkioWeightDevice: weightDevices,
<del> OomKillDisable: c.hostConfig.OomKillDisable,
<del> MemorySwappiness: *c.hostConfig.MemorySwappiness,
<del> }
<del>
<del> processConfig := execdriver.ProcessConfig{
<del> CommonProcessConfig: execdriver.CommonProcessConfig{
<del> Entrypoint: c.Path,
<del> Arguments: c.Args,
<del> Tty: c.Config.Tty,
<del> },
<del> Privileged: c.hostConfig.Privileged,
<del> User: c.Config.User,
<del> }
<del>
<del> processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
<del> processConfig.Env = env
<del>
<del> remappedRoot := &execdriver.User{}
<del> rootUID, rootGID := daemon.GetRemappedUIDGID()
<del> if rootUID != 0 {
<del> remappedRoot.UID = rootUID
<del> remappedRoot.GID = rootGID
<del> }
<del> uidMap, gidMap := daemon.GetUIDGIDMaps()
<del>
<del> c.command = &execdriver.Command{
<del> CommonCommand: execdriver.CommonCommand{
<del> ID: c.ID,
<del> InitPath: "/.dockerinit",
<del> MountLabel: c.getMountLabel(),
<del> Network: en,
<del> ProcessConfig: processConfig,
<del> ProcessLabel: c.getProcessLabel(),
<del> Rootfs: c.rootfsPath(),
<del> Resources: resources,
<del> WorkingDir: c.Config.WorkingDir,
<del> },
<del> AllowedDevices: allowedDevices,
<del> AppArmorProfile: c.AppArmorProfile,
<del> AutoCreatedDevices: autoCreatedDevices,
<del> CapAdd: c.hostConfig.CapAdd.Slice(),
<del> CapDrop: c.hostConfig.CapDrop.Slice(),
<del> CgroupParent: c.hostConfig.CgroupParent,
<del> GIDMapping: gidMap,
<del> GroupAdd: c.hostConfig.GroupAdd,
<del> Ipc: ipc,
<del> OomScoreAdj: c.hostConfig.OomScoreAdj,
<del> Pid: pid,
<del> ReadonlyRootfs: c.hostConfig.ReadonlyRootfs,
<del> RemappedRoot: remappedRoot,
<del> UIDMapping: uidMap,
<del> UTS: uts,
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func mergeDevices(defaultDevices, userDevices []*configs.Device) []*configs.Device {
<del> if len(userDevices) == 0 {
<del> return defaultDevices
<del> }
<del>
<del> paths := map[string]*configs.Device{}
<del> for _, d := range userDevices {
<del> paths[d.Path] = d
<del> }
<del>
<del> var devs []*configs.Device
<del> for _, d := range defaultDevices {
<del> if _, defined := paths[d.Path]; !defined {
<del> devs = append(devs, d)
<del> }
<del> }
<del> return append(devs, userDevices...)
<del>}
<del>
<del>// getSize returns the real size & virtual size of the container.
<del>func (daemon *Daemon) getSize(container *Container) (int64, int64) {
<del> var (
<del> sizeRw, sizeRootfs int64
<del> err error
<del> )
<del>
<del> if err := daemon.Mount(container); err != nil {
<del> logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err)
<del> return sizeRw, sizeRootfs
<del> }
<del> defer daemon.Unmount(container)
<del>
<del> sizeRw, err = container.rwlayer.Size()
<del> if err != nil {
<del> logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", daemon.driver, container.ID, err)
<del> // FIXME: GetSize should return an error. Not changing it now in case
<del> // there is a side-effect.
<del> sizeRw = -1
<del> }
<del>
<del> if parent := container.rwlayer.Parent(); parent != nil {
<del> sizeRootfs, err = parent.Size()
<del> if err != nil {
<del> sizeRootfs = -1
<del> } else if sizeRw != -1 {
<del> sizeRootfs += sizeRw
<del> }
<del> }
<del> return sizeRw, sizeRootfs
<del>}
<del>
<del>// Attempt to set the network mounts given a provided destination and
<del>// the path to use for it; return true if the given destination was a
<del>// network mount file
<del>func (container *Container) trySetNetworkMount(destination string, path string) bool {
<del> if destination == "/etc/resolv.conf" {
<del> container.ResolvConfPath = path
<del> return true
<del> }
<del> if destination == "/etc/hostname" {
<del> container.HostnamePath = path
<del> return true
<del> }
<del> if destination == "/etc/hosts" {
<del> container.HostsPath = path
<del> return true
<del> }
<del>
<del> return false
<del>}
<del>
<del>func (container *Container) buildHostnameFile() error {
<del> hostnamePath, err := container.getRootResourcePath("hostname")
<del> if err != nil {
<del> return err
<del> }
<del> container.HostnamePath = hostnamePath
<del>
<del> if container.Config.Domainname != "" {
<del> return ioutil.WriteFile(container.HostnamePath, []byte(fmt.Sprintf("%s.%s\n", container.Config.Hostname, container.Config.Domainname)), 0644)
<del> }
<del> return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644)
<del>}
<del>
<del>func (daemon *Daemon) buildSandboxOptions(container *Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
<del> var (
<del> sboxOptions []libnetwork.SandboxOption
<del> err error
<del> dns []string
<del> dnsSearch []string
<del> dnsOptions []string
<del> )
<del>
<del> sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname),
<del> libnetwork.OptionDomainname(container.Config.Domainname))
<del>
<del> if container.hostConfig.NetworkMode.IsHost() {
<del> sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox())
<del> sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts"))
<del> sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf"))
<del> } else if daemon.execDriver.SupportsHooks() {
<del> // OptionUseExternalKey is mandatory for userns support.
<del> // But optional for non-userns support
<del> sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
<del> }
<del>
<del> container.HostsPath, err = container.getRootResourcePath("hosts")
<del> if err != nil {
<del> return nil, err
<del> }
<del> sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath))
<del>
<del> container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf")
<del> if err != nil {
<del> return nil, err
<del> }
<del> sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath))
<del>
<del> if len(container.hostConfig.DNS) > 0 {
<del> dns = container.hostConfig.DNS
<del> } else if len(daemon.configStore.DNS) > 0 {
<del> dns = daemon.configStore.DNS
<del> }
<del>
<del> for _, d := range dns {
<del> sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d))
<del> }
<del>
<del> if len(container.hostConfig.DNSSearch) > 0 {
<del> dnsSearch = container.hostConfig.DNSSearch
<del> } else if len(daemon.configStore.DNSSearch) > 0 {
<del> dnsSearch = daemon.configStore.DNSSearch
<del> }
<del>
<del> for _, ds := range dnsSearch {
<del> sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds))
<del> }
<del>
<del> if len(container.hostConfig.DNSOptions) > 0 {
<del> dnsOptions = container.hostConfig.DNSOptions
<del> } else if len(daemon.configStore.DNSOptions) > 0 {
<del> dnsOptions = daemon.configStore.DNSOptions
<del> }
<del>
<del> for _, ds := range dnsOptions {
<del> sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds))
<del> }
<del>
<del> if container.NetworkSettings.SecondaryIPAddresses != nil {
<del> name := container.Config.Hostname
<del> if container.Config.Domainname != "" {
<del> name = name + "." + container.Config.Domainname
<del> }
<del>
<del> for _, a := range container.NetworkSettings.SecondaryIPAddresses {
<del> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr))
<del> }
<del> }
<del>
<del> for _, extraHost := range container.hostConfig.ExtraHosts {
<del> // allow IPv6 addresses in extra hosts; only split on first ":"
<del> parts := strings.SplitN(extraHost, ":", 2)
<del> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1]))
<del> }
<del>
<del> // Link feature is supported only for the default bridge network.
<del> // return if this call to build join options is not for default bridge network
<del> if n.Name() != "bridge" {
<del> return sboxOptions, nil
<del> }
<del>
<del> ep, _ := container.getEndpointInNetwork(n)
<del> if ep == nil {
<del> return sboxOptions, nil
<del> }
<del>
<del> var childEndpoints, parentEndpoints []string
<del>
<del> children, err := daemon.children(container.Name)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> for linkAlias, child := range children {
<del> if !isLinkable(child) {
<del> return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
<del> }
<del> _, alias := path.Split(linkAlias)
<del> // allow access to the linked container via the alias, real name, and container hostname
<del> aliasList := alias + " " + child.Config.Hostname
<del> // only add the name if alias isn't equal to the name
<del> if alias != child.Name[1:] {
<del> aliasList = aliasList + " " + child.Name[1:]
<del> }
<del> sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks["bridge"].IPAddress))
<del> cEndpoint, _ := child.getEndpointInNetwork(n)
<del> if cEndpoint != nil && cEndpoint.ID() != "" {
<del> childEndpoints = append(childEndpoints, cEndpoint.ID())
<del> }
<del> }
<del>
<del> bridgeSettings := container.NetworkSettings.Networks["bridge"]
<del> refs := daemon.containerGraph().RefPaths(container.ID)
<del> for _, ref := range refs {
<del> if ref.ParentID == "0" {
<del> continue
<del> }
<del>
<del> c, err := daemon.Get(ref.ParentID)
<del> if err != nil {
<del> logrus.Error(err)
<del> }
<del>
<del> if c != nil && !daemon.configStore.DisableBridge && container.hostConfig.NetworkMode.IsPrivate() {
<del> logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
<del> sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress))
<del> if ep.ID() != "" {
<del> parentEndpoints = append(parentEndpoints, ep.ID())
<del> }
<del> }
<del> }
<del>
<del> linkOptions := options.Generic{
<del> netlabel.GenericData: options.Generic{
<del> "ParentEndpoints": parentEndpoints,
<del> "ChildEndpoints": childEndpoints,
<del> },
<del> }
<del>
<del> sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions))
<del>
<del> return sboxOptions, nil
<del>}
<del>
<del>func isLinkable(child *Container) bool {
<del> // A container is linkable only if it belongs to the default network
<del> _, ok := child.NetworkSettings.Networks["bridge"]
<del> return ok
<del>}
<del>
<del>func (container *Container) getEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) {
<del> endpointName := strings.TrimPrefix(container.Name, "/")
<del> return n.EndpointByName(endpointName)
<del>}
<del>
<del>func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
<del> if ep == nil {
<del> return nil, derr.ErrorCodeEmptyEndpoint
<del> }
<del>
<del> if networkSettings == nil {
<del> return nil, derr.ErrorCodeEmptyNetwork
<del> }
<del>
<del> driverInfo, err := ep.DriverInfo()
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> if driverInfo == nil {
<del> // It is not an error for epInfo to be nil
<del> return networkSettings, nil
<del> }
<del>
<del> if networkSettings.Ports == nil {
<del> networkSettings.Ports = nat.PortMap{}
<del> }
<del>
<del> if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
<del> if exposedPorts, ok := expData.([]types.TransportPort); ok {
<del> for _, tp := range exposedPorts {
<del> natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
<del> if err != nil {
<del> return nil, derr.ErrorCodeParsingPort.WithArgs(tp.Port, err)
<del> }
<del> networkSettings.Ports[natPort] = nil
<del> }
<del> }
<del> }
<del>
<del> mapData, ok := driverInfo[netlabel.PortMap]
<del> if !ok {
<del> return networkSettings, nil
<del> }
<del>
<del> if portMapping, ok := mapData.([]types.PortBinding); ok {
<del> for _, pp := range portMapping {
<del> natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
<del> if err != nil {
<del> return nil, err
<del> }
<del> natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
<del> networkSettings.Ports[natPort] = append(networkSettings.Ports[natPort], natBndg)
<del> }
<del> }
<del>
<del> return networkSettings, nil
<del>}
<del>
<del>func (container *Container) buildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint, networkSettings *network.Settings) (*network.Settings, error) {
<del> if ep == nil {
<del> return nil, derr.ErrorCodeEmptyEndpoint
<del> }
<del>
<del> if networkSettings == nil {
<del> return nil, derr.ErrorCodeEmptyNetwork
<del> }
<del>
<del> epInfo := ep.Info()
<del> if epInfo == nil {
<del> // It is not an error to get an empty endpoint info
<del> return networkSettings, nil
<del> }
<del>
<del> if _, ok := networkSettings.Networks[n.Name()]; !ok {
<del> networkSettings.Networks[n.Name()] = new(network.EndpointSettings)
<del> }
<del> networkSettings.Networks[n.Name()].EndpointID = ep.ID()
<del>
<del> iface := epInfo.Iface()
<del> if iface == nil {
<del> return networkSettings, nil
<del> }
<del>
<del> if iface.MacAddress() != nil {
<del> networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
<del> }
<del>
<del> if iface.Address() != nil {
<del> ones, _ := iface.Address().Mask.Size()
<del> networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
<del> networkSettings.Networks[n.Name()].IPPrefixLen = ones
<del> }
<del>
<del> if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
<del> onesv6, _ := iface.AddressIPv6().Mask.Size()
<del> networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
<del> networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
<del> }
<del>
<del> return networkSettings, nil
<del>}
<del>
<del>func (container *Container) updateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error {
<del> if _, err := container.buildPortMapInfo(ep, container.NetworkSettings); err != nil {
<del> return err
<del> }
<del>
<del> epInfo := ep.Info()
<del> if epInfo == nil {
<del> // It is not an error to get an empty endpoint info
<del> return nil
<del> }
<del> if epInfo.Gateway() != nil {
<del> container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String()
<del> }
<del> if epInfo.GatewayIPv6().To16() != nil {
<del> container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String()
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) updateNetworkSettings(container *Container, n libnetwork.Network) error {
<del> if container.NetworkSettings == nil {
<del> container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)}
<del> }
<del>
<del> if !container.hostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
<del> return runconfig.ErrConflictHostNetwork
<del> }
<del>
<del> for s := range container.NetworkSettings.Networks {
<del> sn, err := daemon.FindNetwork(s)
<del> if err != nil {
<del> continue
<del> }
<del>
<del> if sn.Name() == n.Name() {
<del> // Avoid duplicate config
<del> return nil
<del> }
<del> if !runconfig.NetworkMode(sn.Type()).IsPrivate() ||
<del> !runconfig.NetworkMode(n.Type()).IsPrivate() {
<del> return runconfig.ErrConflictSharedNetwork
<del> }
<del> if runconfig.NetworkMode(sn.Name()).IsNone() ||
<del> runconfig.NetworkMode(n.Name()).IsNone() {
<del> return runconfig.ErrConflictNoNetwork
<del> }
<del> }
<del> container.NetworkSettings.Networks[n.Name()] = new(network.EndpointSettings)
<del>
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) updateEndpointNetworkSettings(container *Container, n libnetwork.Network, ep libnetwork.Endpoint) error {
<del> networkSettings, err := container.buildEndpointInfo(n, ep, container.NetworkSettings)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if container.hostConfig.NetworkMode == runconfig.NetworkMode("bridge") {
<del> networkSettings.Bridge = daemon.configStore.Bridge.Iface
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (container *Container) updateSandboxNetworkSettings(sb libnetwork.Sandbox) error {
<del> container.NetworkSettings.SandboxID = sb.ID()
<del> container.NetworkSettings.SandboxKey = sb.Key()
<del> return nil
<del>}
<del>
<del>// UpdateNetwork is used to update the container's network (e.g. when linked containers
<del>// get removed/unlinked).
<del>func (daemon *Daemon) updateNetwork(container *Container) error {
<del> ctrl := daemon.netController
<del> sid := container.NetworkSettings.SandboxID
<del>
<del> sb, err := ctrl.SandboxByID(sid)
<del> if err != nil {
<del> return derr.ErrorCodeNoSandbox.WithArgs(sid, err)
<del> }
<del>
<del> // Find if container is connected to the default bridge network
<del> var n libnetwork.Network
<del> for name := range container.NetworkSettings.Networks {
<del> sn, err := daemon.FindNetwork(name)
<del> if err != nil {
<del> continue
<del> }
<del> if sn.Name() == "bridge" {
<del> n = sn
<del> break
<del> }
<del> }
<del>
<del> if n == nil {
<del> // Not connected to the default bridge network; Nothing to do
<del> return nil
<del> }
<del>
<del> options, err := daemon.buildSandboxOptions(container, n)
<del> if err != nil {
<del> return derr.ErrorCodeNetworkUpdate.WithArgs(err)
<del> }
<del>
<del> if err := sb.Refresh(options...); err != nil {
<del> return derr.ErrorCodeNetworkRefresh.WithArgs(sid, err)
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (container *Container) buildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
<del> var (
<del> portSpecs = make(nat.PortSet)
<del> bindings = make(nat.PortMap)
<del> pbList []types.PortBinding
<del> exposeList []types.TransportPort
<del> createOptions []libnetwork.EndpointOption
<del> )
<del>
<del> if n.Name() == "bridge" || container.NetworkSettings.IsAnonymousEndpoint {
<del> createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
<del> }
<del>
<del> // Other configs are applicable only for the endpoint in the network
<del> // to which container was connected to on docker run.
<del> if n.Name() != container.hostConfig.NetworkMode.NetworkName() &&
<del> !(n.Name() == "bridge" && container.hostConfig.NetworkMode.IsDefault()) {
<del> return createOptions, nil
<del> }
<del>
<del> if container.Config.ExposedPorts != nil {
<del> portSpecs = container.Config.ExposedPorts
<del> }
<del>
<del> if container.hostConfig.PortBindings != nil {
<del> for p, b := range container.hostConfig.PortBindings {
<del> bindings[p] = []nat.PortBinding{}
<del> for _, bb := range b {
<del> bindings[p] = append(bindings[p], nat.PortBinding{
<del> HostIP: bb.HostIP,
<del> HostPort: bb.HostPort,
<del> })
<del> }
<del> }
<del> }
<del>
<del> ports := make([]nat.Port, len(portSpecs))
<del> var i int
<del> for p := range portSpecs {
<del> ports[i] = p
<del> i++
<del> }
<del> nat.SortPortMap(ports, bindings)
<del> for _, port := range ports {
<del> expose := types.TransportPort{}
<del> expose.Proto = types.ParseProtocol(port.Proto())
<del> expose.Port = uint16(port.Int())
<del> exposeList = append(exposeList, expose)
<del>
<del> pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto}
<del> binding := bindings[port]
<del> for i := 0; i < len(binding); i++ {
<del> pbCopy := pb.GetCopy()
<del> newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
<del> var portStart, portEnd int
<del> if err == nil {
<del> portStart, portEnd, err = newP.Range()
<del> }
<del> if err != nil {
<del> return nil, derr.ErrorCodeHostPort.WithArgs(binding[i].HostPort, err)
<del> }
<del> pbCopy.HostPort = uint16(portStart)
<del> pbCopy.HostPortEnd = uint16(portEnd)
<del> pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
<del> pbList = append(pbList, pbCopy)
<del> }
<del>
<del> if container.hostConfig.PublishAllPorts && len(binding) == 0 {
<del> pbList = append(pbList, pb)
<del> }
<del> }
<del>
<del> createOptions = append(createOptions,
<del> libnetwork.CreateOptionPortMapping(pbList),
<del> libnetwork.CreateOptionExposedPorts(exposeList))
<del>
<del> if container.Config.MacAddress != "" {
<del> mac, err := net.ParseMAC(container.Config.MacAddress)
<del> if err != nil {
<del> return nil, err
<del> }
<del>
<del> genericOption := options.Generic{
<del> netlabel.MacAddress: mac,
<del> }
<del>
<del> createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
<del> }
<del>
<del> return createOptions, nil
<del>}
<del>
<del>func (daemon *Daemon) allocateNetwork(container *Container) error {
<del> controller := daemon.netController
<del>
<del> // Cleanup any stale sandbox left over due to ungraceful daemon shutdown
<del> if err := controller.SandboxDestroy(container.ID); err != nil {
<del> logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID)
<del> }
<del>
<del> updateSettings := false
<del> if len(container.NetworkSettings.Networks) == 0 {
<del> mode := container.hostConfig.NetworkMode
<del> if container.Config.NetworkDisabled || mode.IsContainer() {
<del> return nil
<del> }
<del>
<del> networkName := mode.NetworkName()
<del> if mode.IsDefault() {
<del> networkName = controller.Config().Daemon.DefaultNetwork
<del> }
<del> if mode.IsUserDefined() {
<del> n, err := daemon.FindNetwork(networkName)
<del> if err != nil {
<del> return err
<del> }
<del> networkName = n.Name()
<del> }
<del> container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings)
<del> container.NetworkSettings.Networks[networkName] = new(network.EndpointSettings)
<del> updateSettings = true
<del> }
<del>
<del> for n := range container.NetworkSettings.Networks {
<del> if err := daemon.connectToNetwork(container, n, updateSettings); err != nil {
<del> return err
<del> }
<del> }
<del>
<del> return container.writeHostConfig()
<del>}
<del>
<del>func (daemon *Daemon) getNetworkSandbox(container *Container) libnetwork.Sandbox {
<del> var sb libnetwork.Sandbox
<del> daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool {
<del> if s.ContainerID() == container.ID {
<del> sb = s
<del> return true
<del> }
<del> return false
<del> })
<del> return sb
<del>}
<del>
<del>// ConnectToNetwork connects a container to a network
<del>func (daemon *Daemon) ConnectToNetwork(container *Container, idOrName string) error {
<del> if !container.Running {
<del> return derr.ErrorCodeNotRunning.WithArgs(container.ID)
<del> }
<del> if err := daemon.connectToNetwork(container, idOrName, true); err != nil {
<del> return err
<del> }
<del> if err := container.toDiskLocking(); err != nil {
<del> return fmt.Errorf("Error saving container to disk: %v", err)
<del> }
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) connectToNetwork(container *Container, idOrName string, updateSettings bool) (err error) {
<del> if container.hostConfig.NetworkMode.IsContainer() {
<del> return runconfig.ErrConflictSharedNetwork
<del> }
<del>
<del> if runconfig.NetworkMode(idOrName).IsBridge() &&
<del> daemon.configStore.DisableBridge {
<del> container.Config.NetworkDisabled = true
<del> return nil
<del> }
<del>
<del> controller := daemon.netController
<del>
<del> n, err := daemon.FindNetwork(idOrName)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if updateSettings {
<del> if err := daemon.updateNetworkSettings(container, n); err != nil {
<del> return err
<del> }
<del> }
<del>
<del> ep, err := container.getEndpointInNetwork(n)
<del> if err == nil {
<del> return fmt.Errorf("container already connected to network %s", idOrName)
<del> }
<del>
<del> if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
<del> return err
<del> }
<del>
<del> createOptions, err := container.buildCreateEndpointOptions(n)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> endpointName := strings.TrimPrefix(container.Name, "/")
<del> ep, err = n.CreateEndpoint(endpointName, createOptions...)
<del> if err != nil {
<del> return err
<del> }
<del> defer func() {
<del> if err != nil {
<del> if e := ep.Delete(); e != nil {
<del> logrus.Warnf("Could not rollback container connection to network %s", idOrName)
<del> }
<del> }
<del> }()
<del>
<del> if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil {
<del> return err
<del> }
<del>
<del> sb := daemon.getNetworkSandbox(container)
<del> if sb == nil {
<del> options, err := daemon.buildSandboxOptions(container, n)
<del> if err != nil {
<del> return err
<del> }
<del> sb, err = controller.NewSandbox(container.ID, options...)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> container.updateSandboxNetworkSettings(sb)
<del> }
<del>
<del> if err := ep.Join(sb); err != nil {
<del> return err
<del> }
<del>
<del> if err := container.updateJoinInfo(n, ep); err != nil {
<del> return derr.ErrorCodeJoinInfo.WithArgs(err)
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) initializeNetworking(container *Container) error {
<del> var err error
<del>
<del> if container.hostConfig.NetworkMode.IsContainer() {
<del> // we need to get the hosts files from the container to join
<del> nc, err := daemon.getNetworkedContainer(container.ID, container.hostConfig.NetworkMode.ConnectedContainer())
<del> if err != nil {
<del> return err
<del> }
<del> container.HostnamePath = nc.HostnamePath
<del> container.HostsPath = nc.HostsPath
<del> container.ResolvConfPath = nc.ResolvConfPath
<del> container.Config.Hostname = nc.Config.Hostname
<del> container.Config.Domainname = nc.Config.Domainname
<del> return nil
<del> }
<del>
<del> if container.hostConfig.NetworkMode.IsHost() {
<del> container.Config.Hostname, err = os.Hostname()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> parts := strings.SplitN(container.Config.Hostname, ".", 2)
<del> if len(parts) > 1 {
<del> container.Config.Hostname = parts[0]
<del> container.Config.Domainname = parts[1]
<del> }
<del>
<del> }
<del>
<del> if err := daemon.allocateNetwork(container); err != nil {
<del> return err
<del> }
<del>
<del> return container.buildHostnameFile()
<del>}
<del>
<del>// called from the libcontainer pre-start hook to set the network
<del>// namespace configuration linkage to the libnetwork "sandbox" entity
<del>func (daemon *Daemon) setNetworkNamespaceKey(containerID string, pid int) error {
<del> path := fmt.Sprintf("/proc/%d/ns/net", pid)
<del> var sandbox libnetwork.Sandbox
<del> search := libnetwork.SandboxContainerWalker(&sandbox, containerID)
<del> daemon.netController.WalkSandboxes(search)
<del> if sandbox == nil {
<del> return derr.ErrorCodeNoSandbox.WithArgs(containerID, "no sandbox found")
<del> }
<del>
<del> return sandbox.SetKey(path)
<del>}
<del>
<del>func (daemon *Daemon) getIpcContainer(container *Container) (*Container, error) {
<del> containerID := container.hostConfig.IpcMode.Container()
<del> c, err := daemon.Get(containerID)
<del> if err != nil {
<del> return nil, err
<del> }
<del> if !c.IsRunning() {
<del> return nil, derr.ErrorCodeIPCRunning
<del> }
<del> return c, nil
<del>}
<del>
<del>func (container *Container) setupWorkingDirectory() error {
<del> if container.Config.WorkingDir == "" {
<del> return nil
<del> }
<del> container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
<del>
<del> pth, err := container.GetResourcePath(container.Config.WorkingDir)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> pthInfo, err := os.Stat(pth)
<del> if err != nil {
<del> if !os.IsNotExist(err) {
<del> return err
<del> }
<del>
<del> if err := system.MkdirAll(pth, 0755); err != nil {
<del> return err
<del> }
<del> }
<del> if pthInfo != nil && !pthInfo.IsDir() {
<del> return derr.ErrorCodeNotADir.WithArgs(container.Config.WorkingDir)
<del> }
<del> return nil
<del>}
<del>
<del>func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*Container, error) {
<del> nc, err := daemon.Get(connectedContainerID)
<del> if err != nil {
<del> return nil, err
<del> }
<del> if containerID == nc.ID {
<del> return nil, derr.ErrorCodeJoinSelf
<del> }
<del> if !nc.IsRunning() {
<del> return nil, derr.ErrorCodeJoinRunning.WithArgs(connectedContainerID)
<del> }
<del> return nc, nil
<del>}
<del>
<del>func (daemon *Daemon) releaseNetwork(container *Container) {
<del> if container.hostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled {
<del> return
<del> }
<del>
<del> sid := container.NetworkSettings.SandboxID
<del> networks := container.NetworkSettings.Networks
<del> for n := range networks {
<del> networks[n] = &network.EndpointSettings{}
<del> }
<del>
<del> container.NetworkSettings = &network.Settings{Networks: networks}
<del>
<del> if sid == "" || len(networks) == 0 {
<del> return
<del> }
<del>
<del> sb, err := daemon.netController.SandboxByID(sid)
<del> if err != nil {
<del> logrus.Errorf("error locating sandbox id %s: %v", sid, err)
<del> return
<del> }
<del>
<del> if err := sb.Delete(); err != nil {
<del> logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err)
<del> }
<del>}
<del>
<del>// DisconnectFromNetwork disconnects a container from a network
<del>func (container *Container) DisconnectFromNetwork(n libnetwork.Network) error {
<del> if !container.Running {
<del> return derr.ErrorCodeNotRunning.WithArgs(container.ID)
<del> }
<del>
<del> if container.hostConfig.NetworkMode.IsHost() && runconfig.NetworkMode(n.Type()).IsHost() {
<del> return runconfig.ErrConflictHostNetwork
<del> }
<del>
<del> if err := container.disconnectFromNetwork(n); err != nil {
<del> return err
<del> }
<del>
<del> if err := container.toDiskLocking(); err != nil {
<del> return fmt.Errorf("Error saving container to disk: %v", err)
<del> }
<del> return nil
<del>}
<del>
<del>func (container *Container) disconnectFromNetwork(n libnetwork.Network) error {
<del> var (
<del> ep libnetwork.Endpoint
<del> sbox libnetwork.Sandbox
<del> )
<del>
<del> s := func(current libnetwork.Endpoint) bool {
<del> epInfo := current.Info()
<del> if epInfo == nil {
<del> return false
<del> }
<del> if sb := epInfo.Sandbox(); sb != nil {
<del> if sb.ContainerID() == container.ID {
<del> ep = current
<del> sbox = sb
<del> return true
<del> }
<del> }
<del> return false
<del> }
<del> n.WalkEndpoints(s)
<del>
<del> if ep == nil {
<del> return fmt.Errorf("container %s is not connected to the network", container.ID)
<del> }
<del>
<del> if err := ep.Leave(sbox); err != nil {
<del> return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
<del> }
<del>
<del> if err := ep.Delete(); err != nil {
<del> return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
<del> }
<del>
<del> delete(container.NetworkSettings.Networks, n.Name())
<del> return nil
<del>}
<del>
<del>// appendNetworkMounts appends any network mounts to the array of mount points passed in
<del>func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) {
<del> for _, mnt := range container.networkMounts() {
<del> dest, err := container.GetResourcePath(mnt.Destination)
<del> if err != nil {
<del> return nil, err
<del> }
<del> volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest})
<del> }
<del> return volumeMounts, nil
<del>}
<del>
<del>func (container *Container) networkMounts() []execdriver.Mount {
<del> var mounts []execdriver.Mount
<del> shared := container.hostConfig.NetworkMode.IsContainer()
<del> if container.ResolvConfPath != "" {
<del> if _, err := os.Stat(container.ResolvConfPath); err != nil {
<del> logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
<del> } else {
<del> label.Relabel(container.ResolvConfPath, container.MountLabel, shared)
<del> writable := !container.hostConfig.ReadonlyRootfs
<del> if m, exists := container.MountPoints["/etc/resolv.conf"]; exists {
<del> writable = m.RW
<del> }
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: container.ResolvConfPath,
<del> Destination: "/etc/resolv.conf",
<del> Writable: writable,
<del> Private: true,
<del> })
<del> }
<del> }
<del> if container.HostnamePath != "" {
<del> if _, err := os.Stat(container.HostnamePath); err != nil {
<del> logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err)
<del> } else {
<del> label.Relabel(container.HostnamePath, container.MountLabel, shared)
<del> writable := !container.hostConfig.ReadonlyRootfs
<del> if m, exists := container.MountPoints["/etc/hostname"]; exists {
<del> writable = m.RW
<del> }
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: container.HostnamePath,
<del> Destination: "/etc/hostname",
<del> Writable: writable,
<del> Private: true,
<del> })
<del> }
<del> }
<del> if container.HostsPath != "" {
<del> if _, err := os.Stat(container.HostsPath); err != nil {
<del> logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err)
<del> } else {
<del> label.Relabel(container.HostsPath, container.MountLabel, shared)
<del> writable := !container.hostConfig.ReadonlyRootfs
<del> if m, exists := container.MountPoints["/etc/hosts"]; exists {
<del> writable = m.RW
<del> }
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: container.HostsPath,
<del> Destination: "/etc/hosts",
<del> Writable: writable,
<del> Private: true,
<del> })
<del> }
<del> }
<del> return mounts
<del>}
<del>
<del>func (container *Container) copyImagePathContent(v volume.Volume, destination string) error {
<del> rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if _, err = ioutil.ReadDir(rootfs); err != nil {
<del> if os.IsNotExist(err) {
<del> return nil
<del> }
<del> return err
<del> }
<del>
<del> path, err := v.Mount()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if err := copyExistingContents(rootfs, path); err != nil {
<del> return err
<del> }
<del>
<del> return v.Unmount()
<del>}
<del>
<del>func (container *Container) shmPath() (string, error) {
<del> return container.getRootResourcePath("shm")
<del>}
<del>func (container *Container) mqueuePath() (string, error) {
<del> return container.getRootResourcePath("mqueue")
<del>}
<del>
<del>func (container *Container) hasMountFor(path string) bool {
<del> _, exists := container.MountPoints[path]
<del> return exists
<del>}
<del>
<del>func (daemon *Daemon) setupIpcDirs(container *Container) error {
<del> rootUID, rootGID := daemon.GetRemappedUIDGID()
<del> if !container.hasMountFor("/dev/shm") {
<del> shmPath, err := container.shmPath()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
<del> return err
<del> }
<del>
<del> shmSize := DefaultSHMSize
<del> if container.hostConfig.ShmSize != nil {
<del> shmSize = *container.hostConfig.ShmSize
<del> }
<del>
<del> shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
<del> if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, container.getMountLabel())); err != nil {
<del> return fmt.Errorf("mounting shm tmpfs: %s", err)
<del> }
<del> if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
<del> return err
<del> }
<del> }
<del>
<del> if !container.hasMountFor("/dev/mqueue") {
<del> mqueuePath, err := container.mqueuePath()
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil {
<del> return err
<del> }
<del>
<del> if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil {
<del> return fmt.Errorf("mounting mqueue mqueue : %s", err)
<del> }
<del> if err := os.Chown(mqueuePath, rootUID, rootGID); err != nil {
<del> return err
<del> }
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (container *Container) unmountIpcMounts(unmount func(pth string) error) {
<del> if container.hostConfig.IpcMode.IsContainer() || container.hostConfig.IpcMode.IsHost() {
<del> return
<del> }
<del>
<del> var warnings []string
<del>
<del> if !container.hasMountFor("/dev/shm") {
<del> shmPath, err := container.shmPath()
<del> if err != nil {
<del> logrus.Error(err)
<del> warnings = append(warnings, err.Error())
<del> } else if shmPath != "" {
<del> if err := unmount(shmPath); err != nil {
<del> warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err))
<del> }
<del>
<del> }
<del> }
<del>
<del> if !container.hasMountFor("/dev/mqueue") {
<del> mqueuePath, err := container.mqueuePath()
<del> if err != nil {
<del> logrus.Error(err)
<del> warnings = append(warnings, err.Error())
<del> } else if mqueuePath != "" {
<del> if err := unmount(mqueuePath); err != nil {
<del> warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", mqueuePath, err))
<del> }
<del> }
<del> }
<del>
<del> if len(warnings) > 0 {
<del> logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n"))
<del> }
<del>}
<del>
<del>func (container *Container) ipcMounts() []execdriver.Mount {
<del> var mounts []execdriver.Mount
<del>
<del> if !container.hasMountFor("/dev/shm") {
<del> label.SetFileLabel(container.ShmPath, container.MountLabel)
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: container.ShmPath,
<del> Destination: "/dev/shm",
<del> Writable: true,
<del> Private: true,
<del> })
<del> }
<del>
<del> if !container.hasMountFor("/dev/mqueue") {
<del> label.SetFileLabel(container.MqueuePath, container.MountLabel)
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: container.MqueuePath,
<del> Destination: "/dev/mqueue",
<del> Writable: true,
<del> Private: true,
<del> })
<del> }
<del> return mounts
<del>}
<del>
<del>func detachMounted(path string) error {
<del> return syscall.Unmount(path, syscall.MNT_DETACH)
<del>}
<del>
<del>func (daemon *Daemon) mountVolumes(container *Container) error {
<del> mounts, err := daemon.setupMounts(container)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> for _, m := range mounts {
<del> dest, err := container.GetResourcePath(m.Destination)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> var stat os.FileInfo
<del> stat, err = os.Stat(m.Source)
<del> if err != nil {
<del> return err
<del> }
<del> if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {
<del> return err
<del> }
<del>
<del> opts := "rbind,ro"
<del> if m.Writable {
<del> opts = "rbind,rw"
<del> }
<del>
<del> if err := mount.Mount(m.Source, dest, "bind", opts); err != nil {
<del> return err
<del> }
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (container *Container) unmountVolumes(forceSyscall bool) error {
<del> var (
<del> volumeMounts []volume.MountPoint
<del> err error
<del> )
<del>
<del> for _, mntPoint := range container.MountPoints {
<del> dest, err := container.GetResourcePath(mntPoint.Destination)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume})
<del> }
<del>
<del> // Append any network mounts to the list (this is a no-op on Windows)
<del> if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil {
<del> return err
<del> }
<del>
<del> for _, volumeMount := range volumeMounts {
<del> if forceSyscall {
<del> if err := detachMounted(volumeMount.Destination); err != nil {
<del> logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err)
<del> }
<del> }
<del>
<del> if volumeMount.Volume != nil {
<del> if err := volumeMount.Volume.Unmount(); err != nil {
<del> return err
<del> }
<del> }
<del> }
<del>
<del> return nil
<del>}
<del>
<del>func (container *Container) tmpfsMounts() []execdriver.Mount {
<del> var mounts []execdriver.Mount
<del> for dest, data := range container.hostConfig.Tmpfs {
<del> mounts = append(mounts, execdriver.Mount{
<del> Source: "tmpfs",
<del> Destination: dest,
<del> Data: data,
<del> })
<del> }
<del> return mounts
<del>}
<ide><path>daemon/create.go
<ide> package daemon
<ide> import (
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/idtools"
<ide> func (daemon *Daemon) ContainerCreate(params *ContainerCreateConfig) (types.Cont
<ide> }
<ide>
<ide> // Create creates a new container from the given configuration with a given name.
<del>func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, retErr error) {
<add>func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *container.Container, retErr error) {
<ide> var (
<del> container *Container
<add> container *container.Container
<ide> img *image.Image
<ide> imgID image.ID
<ide> err error
<ide> func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re
<ide> if err != nil {
<ide> return nil, err
<ide> }
<del> if err := idtools.MkdirAs(container.root, 0700, rootUID, rootGID); err != nil {
<add> if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil {
<ide> return nil, err
<ide> }
<ide>
<ide> func (daemon *Daemon) create(params *ContainerCreateConfig) (retC *Container, re
<ide> return nil, err
<ide> }
<ide>
<del> if err := container.toDiskLocking(); err != nil {
<add> if err := container.ToDiskLocking(); err != nil {
<ide> logrus.Errorf("Error saving new container to disk: %v", err)
<ide> return nil, err
<ide> }
<ide><path>daemon/create_unix.go
<ide> import (
<ide> "os"
<ide> "path/filepath"
<ide>
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/stringid"
<ide> import (
<ide> )
<ide>
<ide> // createContainerPlatformSpecificSettings performs platform specific container create functionality
<del>func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
<add>func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
<ide> if err := daemon.Mount(container); err != nil {
<ide> return err
<ide> }
<ide> func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain
<ide>
<ide> // Skip volumes for which we already have something mounted on that
<ide> // destination because of a --volume-from.
<del> if container.isDestinationMounted(destination) {
<add> if container.IsDestinationMounted(destination) {
<ide> continue
<ide> }
<ide> path, err := container.GetResourcePath(destination)
<ide> func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain
<ide>
<ide> // never attempt to copy existing content in a container FS to a shared volume
<ide> if v.DriverName() == volume.DefaultDriverName {
<del> if err := container.copyImagePathContent(v, destination); err != nil {
<add> if err := container.CopyImagePathContent(v, destination); err != nil {
<ide> return err
<ide> }
<ide> }
<ide>
<del> container.addMountPointWithVolume(destination, v, true)
<add> container.AddMountPointWithVolume(destination, v, true)
<ide> }
<ide> return nil
<ide> }
<ide><path>daemon/create_windows.go
<ide> package daemon
<ide> import (
<ide> "fmt"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/stringid"
<ide> "github.com/docker/docker/runconfig"
<ide> "github.com/docker/docker/volume"
<ide> )
<ide>
<ide> // createContainerPlatformSpecificSettings performs platform specific container create functionality
<del>func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
<add>func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *runconfig.Config, hostConfig *runconfig.HostConfig, img *image.Image) error {
<ide> for spec := range config.Volumes {
<ide>
<ide> mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver)
<ide> func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain
<ide>
<ide> // Skip volumes for which we already have something mounted on that
<ide> // destination because of a --volume-from.
<del> if container.isDestinationMounted(mp.Destination) {
<add> if container.IsDestinationMounted(mp.Destination) {
<ide> continue
<ide> }
<ide>
<ide> func (daemon *Daemon) createContainerPlatformSpecificSettings(container *Contain
<ide> //
<ide> // // never attempt to copy existing content in a container FS to a shared volume
<ide> // if v.DriverName() == volume.DefaultDriverName {
<del> // if err := container.copyImagePathContent(v, mp.Destination); err != nil {
<add> // if err := container.CopyImagePathContent(v, mp.Destination); err != nil {
<ide> // return err
<ide> // }
<ide> // }
<ide>
<ide> // Add it to container.MountPoints
<del> container.addMountPointWithVolume(mp.Destination, v, mp.RW)
<add> container.AddMountPointWithVolume(mp.Destination, v, mp.RW)
<ide> }
<ide> return nil
<ide> }
<ide><path>daemon/daemon.go
<ide> import (
<ide> "github.com/docker/docker/api"
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/cliconfig"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/events"
<ide> "github.com/docker/docker/daemon/exec"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> func (e ErrImageDoesNotExist) Error() string {
<ide> }
<ide>
<ide> type contStore struct {
<del> s map[string]*Container
<add> s map[string]*container.Container
<ide> sync.Mutex
<ide> }
<ide>
<del>func (c *contStore) Add(id string, cont *Container) {
<add>func (c *contStore) Add(id string, cont *container.Container) {
<ide> c.Lock()
<ide> c.s[id] = cont
<ide> c.Unlock()
<ide> }
<ide>
<del>func (c *contStore) Get(id string) *Container {
<add>func (c *contStore) Get(id string) *container.Container {
<ide> c.Lock()
<ide> res := c.s[id]
<ide> c.Unlock()
<ide> func (c *contStore) Delete(id string) {
<ide> c.Unlock()
<ide> }
<ide>
<del>func (c *contStore) List() []*Container {
<add>func (c *contStore) List() []*container.Container {
<ide> containers := new(History)
<ide> c.Lock()
<ide> for _, cont := range c.s {
<ide> type Daemon struct {
<ide> // - A partial container ID prefix (e.g. short ID) of any length that is
<ide> // unique enough to only return a single container object
<ide> // If none of these searches succeed, an error is returned
<del>func (daemon *Daemon) Get(prefixOrName string) (*Container, error) {
<add>func (daemon *Daemon) Get(prefixOrName string) (*container.Container, error) {
<ide> if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil {
<ide> // prefix is an exact match to a full container ID
<ide> return containerByID, nil
<ide> func (daemon *Daemon) Exists(id string) bool {
<ide> // IsPaused returns a bool indicating if the specified container is paused.
<ide> func (daemon *Daemon) IsPaused(id string) bool {
<ide> c, _ := daemon.Get(id)
<del> return c.State.isPaused()
<add> return c.State.IsPaused()
<ide> }
<ide>
<ide> func (daemon *Daemon) containerRoot(id string) string {
<ide> func (daemon *Daemon) containerRoot(id string) string {
<ide>
<ide> // Load reads the contents of a container from disk
<ide> // This is typically done at startup.
<del>func (daemon *Daemon) load(id string) (*Container, error) {
<add>func (daemon *Daemon) load(id string) (*container.Container, error) {
<ide> container := daemon.newBaseContainer(id)
<ide>
<del> if err := container.fromDisk(); err != nil {
<add> if err := container.FromDisk(); err != nil {
<ide> return nil, err
<ide> }
<ide>
<ide> func (daemon *Daemon) load(id string) (*Container, error) {
<ide> }
<ide>
<ide> // Register makes a container object usable by the daemon as <container.ID>
<del>func (daemon *Daemon) Register(container *Container) error {
<add>func (daemon *Daemon) Register(container *container.Container) error {
<ide> if daemon.Exists(container.ID) {
<ide> return fmt.Errorf("Container is already loaded")
<ide> }
<ide> func (daemon *Daemon) Register(container *Container) error {
<ide> if container.IsRunning() {
<ide> logrus.Debugf("killing old running container %s", container.ID)
<ide> // Set exit code to 128 + SIGKILL (9) to properly represent unsuccessful exit
<del> container.setStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
<add> container.SetStoppedLocking(&execdriver.ExitStatus{ExitCode: 137})
<ide> // use the current driver and ensure that the container is dead x.x
<ide> cmd := &execdriver.Command{
<ide> CommonCommand: execdriver.CommonCommand{
<ide> func (daemon *Daemon) Register(container *Container) error {
<ide> }
<ide> daemon.execDriver.Terminate(cmd)
<ide>
<del> container.unmountIpcMounts(mount.Unmount)
<add> container.UnmountIpcMounts(mount.Unmount)
<ide>
<ide> daemon.Unmount(container)
<del> if err := container.toDiskLocking(); err != nil {
<add> if err := container.ToDiskLocking(); err != nil {
<ide> logrus.Errorf("Error saving stopped state to disk: %v", err)
<ide> }
<ide> }
<ide> func (daemon *Daemon) Register(container *Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) ensureName(container *Container) error {
<add>func (daemon *Daemon) ensureName(container *container.Container) error {
<ide> if container.Name == "" {
<ide> name, err := daemon.generateNewName(container.ID)
<ide> if err != nil {
<ide> return err
<ide> }
<ide> container.Name = name
<ide>
<del> if err := container.toDiskLocking(); err != nil {
<add> if err := container.ToDiskLocking(); err != nil {
<ide> logrus.Errorf("Error saving container name to disk: %v", err)
<ide> }
<ide> }
<ide> func (daemon *Daemon) ensureName(container *Container) error {
<ide>
<ide> func (daemon *Daemon) restore() error {
<ide> type cr struct {
<del> container *Container
<add> container *container.Container
<ide> registered bool
<ide> }
<ide>
<ide> func (daemon *Daemon) restore() error {
<ide> for _, c := range containers {
<ide> group.Add(1)
<ide>
<del> go func(container *Container, registered bool) {
<add> go func(container *container.Container, registered bool) {
<ide> defer group.Done()
<ide>
<ide> if !registered {
<ide> func (daemon *Daemon) restore() error {
<ide>
<ide> // check the restart policy on the containers and restart any container with
<ide> // the restart policy of "always"
<del> if daemon.configStore.AutoRestart && container.shouldRestart() {
<add> if daemon.configStore.AutoRestart && container.ShouldRestart() {
<ide> logrus.Debugf("Starting container %s", container.ID)
<ide>
<ide> if err := daemon.containerStart(container); err != nil {
<ide> func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *stringutils.StrSlic
<ide> return cmdSlice[0], cmdSlice[1:]
<ide> }
<ide>
<del>func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID image.ID) (*Container, error) {
<add>func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID image.ID) (*container.Container, error) {
<ide> var (
<ide> id string
<ide> err error
<ide> func (daemon *Daemon) newContainer(name string, config *runconfig.Config, imgID
<ide> base.Path = entrypoint
<ide> base.Args = args //FIXME: de-duplicate from config
<ide> base.Config = config
<del> base.hostConfig = &runconfig.HostConfig{}
<add> base.HostConfig = &runconfig.HostConfig{}
<ide> base.ImageID = imgID
<ide> base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
<ide> base.Name = name
<ide> func GetFullContainerName(name string) (string, error) {
<ide> }
<ide>
<ide> // GetByName returns a container given a name.
<del>func (daemon *Daemon) GetByName(name string) (*Container, error) {
<add>func (daemon *Daemon) GetByName(name string) (*container.Container, error) {
<ide> fullName, err := GetFullContainerName(name)
<ide> if err != nil {
<ide> return nil, err
<ide> func (daemon *Daemon) GetLabels(id string) map[string]string {
<ide> // children returns all child containers of the container with the
<ide> // given name. The containers are returned as a map from the container
<ide> // name to a pointer to Container.
<del>func (daemon *Daemon) children(name string) (map[string]*Container, error) {
<add>func (daemon *Daemon) children(name string) (map[string]*container.Container, error) {
<ide> name, err := GetFullContainerName(name)
<ide> if err != nil {
<ide> return nil, err
<ide> }
<del> children := make(map[string]*Container)
<add> children := make(map[string]*container.Container)
<ide>
<ide> err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
<ide> c, err := daemon.Get(e.ID())
<ide> func (daemon *Daemon) parents(name string) ([]string, error) {
<ide> return daemon.containerGraphDB.Parents(name)
<ide> }
<ide>
<del>func (daemon *Daemon) registerLink(parent, child *Container, alias string) error {
<add>func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
<ide> fullName := filepath.Join(parent.Name, alias)
<ide> if !daemon.containerGraphDB.Exists(fullName) {
<ide> _, err := daemon.containerGraphDB.Set(fullName, child.ID)
<ide> func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
<ide>
<ide> d.ID = trustKey.PublicKey().KeyID()
<ide> d.repository = daemonRepo
<del> d.containers = &contStore{s: make(map[string]*Container)}
<add> d.containers = &contStore{s: make(map[string]*container.Container)}
<ide> d.execCommands = exec.NewStore()
<ide> d.tagStore = tagStore
<ide> d.distributionPool = distributionPool
<ide> func NewDaemon(config *Config, registryService *registry.Service) (daemon *Daemo
<ide> return d, nil
<ide> }
<ide>
<del>func (daemon *Daemon) shutdownContainer(c *Container) error {
<add>func (daemon *Daemon) shutdownContainer(c *container.Container) error {
<ide> // TODO(windows): Handle docker restart with paused containers
<del> if c.isPaused() {
<add> if c.IsPaused() {
<ide> // To terminate a process in freezer cgroup, we should send
<ide> // SIGTERM to this process then unfreeze it, and the process will
<ide> // force to terminate immediately.
<ide> func (daemon *Daemon) Shutdown() error {
<ide> if daemon.containers != nil {
<ide> group := sync.WaitGroup{}
<ide> logrus.Debug("starting clean shutdown of all containers...")
<del> for _, container := range daemon.List() {
<del> if !container.IsRunning() {
<add> for _, cont := range daemon.List() {
<add> if !cont.IsRunning() {
<ide> continue
<ide> }
<del> logrus.Debugf("stopping %s", container.ID)
<add> logrus.Debugf("stopping %s", cont.ID)
<ide> group.Add(1)
<del> go func(c *Container) {
<add> go func(c *container.Container) {
<ide> defer group.Done()
<ide> if err := daemon.shutdownContainer(c); err != nil {
<ide> logrus.Errorf("Stop container error: %v", err)
<ide> return
<ide> }
<ide> logrus.Debugf("container stopped %s", c.ID)
<del> }(container)
<add> }(cont)
<ide> }
<ide> group.Wait()
<ide> }
<ide> func (daemon *Daemon) Shutdown() error {
<ide> return nil
<ide> }
<ide>
<del>// Mount sets container.basefs
<add>// Mount sets container.BaseFS
<ide> // (is it not set coming in? why is it unset?)
<del>func (daemon *Daemon) Mount(container *Container) error {
<add>func (daemon *Daemon) Mount(container *container.Container) error {
<ide> var layerID layer.ChainID
<ide> if container.ImageID != "" {
<ide> img, err := daemon.imageStore.Get(container.ImageID)
<ide> func (daemon *Daemon) Mount(container *Container) error {
<ide> }
<ide> layerID = img.RootFS.ChainID()
<ide> }
<del> rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.getMountLabel(), daemon.setupInitLayer)
<add> rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.GetMountLabel(), daemon.setupInitLayer)
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (daemon *Daemon) Mount(container *Container) error {
<ide> }
<ide> logrus.Debugf("container mounted via layerStore: %v", dir)
<ide>
<del> if container.basefs != dir {
<add> if container.BaseFS != dir {
<ide> // The mount path reported by the graph driver should always be trusted on Windows, since the
<ide> // volume path for a given mounted layer may change over time. This should only be an error
<ide> // on non-Windows operating systems.
<del> if container.basefs != "" && runtime.GOOS != "windows" {
<add> if container.BaseFS != "" && runtime.GOOS != "windows" {
<ide> daemon.Unmount(container)
<ide> return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
<del> daemon.driver, container.ID, container.basefs, dir)
<add> daemon.driver, container.ID, container.BaseFS, dir)
<ide> }
<ide> }
<del> container.basefs = dir // TODO: combine these fields
<del> container.rwlayer = rwlayer
<add> container.BaseFS = dir // TODO: combine these fields
<add> container.RWLayer = rwlayer
<ide> return nil
<ide> }
<ide>
<ide> // Unmount unsets the container base filesystem
<del>func (daemon *Daemon) Unmount(container *Container) {
<add>func (daemon *Daemon) Unmount(container *container.Container) {
<ide> if err := daemon.layerStore.Unmount(container.ID); err != nil {
<ide> logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
<ide> }
<ide> }
<ide>
<ide> // Run uses the execution driver to run a given container
<del>func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
<add>func (daemon *Daemon) Run(c *container.Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) {
<ide> hooks := execdriver.Hooks{
<ide> Start: startCallback,
<ide> }
<ide> hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error {
<ide> return daemon.setNetworkNamespaceKey(c.ID, pid)
<ide> })
<del> return daemon.execDriver.Run(c.command, pipes, hooks)
<add> return daemon.execDriver.Run(c.Command, pipes, hooks)
<ide> }
<ide>
<del>func (daemon *Daemon) kill(c *Container, sig int) error {
<del> return daemon.execDriver.Kill(c.command, sig)
<add>func (daemon *Daemon) kill(c *container.Container, sig int) error {
<add> return daemon.execDriver.Kill(c.Command, sig)
<ide> }
<ide>
<del>func (daemon *Daemon) stats(c *Container) (*execdriver.ResourceStats, error) {
<add>func (daemon *Daemon) stats(c *container.Container) (*execdriver.ResourceStats, error) {
<ide> return daemon.execDriver.Stats(c.ID)
<ide> }
<ide>
<del>func (daemon *Daemon) subscribeToContainerStats(c *Container) chan interface{} {
<add>func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} {
<ide> return daemon.statsCollector.collect(c)
<ide> }
<ide>
<del>func (daemon *Daemon) unsubscribeToContainerStats(c *Container, ch chan interface{}) {
<add>func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) {
<ide> daemon.statsCollector.unsubscribe(c, ch)
<ide> }
<ide>
<del>func (daemon *Daemon) changes(container *Container) ([]archive.Change, error) {
<add>func (daemon *Daemon) changes(container *container.Container) ([]archive.Change, error) {
<ide> return daemon.layerStore.Changes(container.ID)
<ide> }
<ide>
<ide> func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
<ide> return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
<ide> }
<ide>
<del>func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error {
<add>func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *runconfig.HostConfig) error {
<ide> container.Lock()
<ide> if err := parseSecurityOpt(container, hostConfig); err != nil {
<ide> container.Unlock()
<ide> func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.
<ide> return err
<ide> }
<ide>
<del> container.hostConfig = hostConfig
<del> container.toDisk()
<add> container.HostConfig = hostConfig
<add> container.ToDisk()
<ide> return nil
<ide> }
<ide>
<ide> func (daemon *Daemon) IsShuttingDown() bool {
<ide> }
<ide>
<ide> // GetContainerStats collects all the stats published by a container
<del>func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.ResourceStats, error) {
<add>func (daemon *Daemon) GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error) {
<ide> stats, err := daemon.stats(container)
<ide> if err != nil {
<ide> return nil, err
<ide> func (daemon *Daemon) GetContainerStats(container *Container) (*execdriver.Resou
<ide> return stats, nil
<ide> }
<ide>
<del>func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInterface, error) {
<add>func (daemon *Daemon) getNetworkStats(c *container.Container) ([]*libcontainer.NetworkInterface, error) {
<ide> var list []*libcontainer.NetworkInterface
<ide>
<ide> sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID)
<ide> func (daemon *Daemon) getNetworkStats(c *Container) ([]*libcontainer.NetworkInte
<ide>
<ide> // newBaseContainer creates a new container with its initial
<ide> // configuration based on the root storage from the daemon.
<del>func (daemon *Daemon) newBaseContainer(id string) *Container {
<del> return newBaseContainer(id, daemon.containerRoot(id))
<add>func (daemon *Daemon) newBaseContainer(id string) *container.Container {
<add> return container.NewBaseContainer(id, daemon.containerRoot(id))
<ide> }
<ide>
<ide> func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
<ide> func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *lib
<ide> n.TxDropped = stats.TxDropped
<ide> return n
<ide> }
<add>
<add>func validateID(id string) error {
<add> if id == "" {
<add> return derr.ErrorCodeEmptyID
<add> }
<add> return nil
<add>}
<ide><path>daemon/daemon_test.go
<ide> package daemon
<ide>
<ide> import (
<add> "io/ioutil"
<ide> "os"
<ide> "path"
<add> "path/filepath"
<ide> "testing"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/pkg/graphdb"
<ide> "github.com/docker/docker/pkg/truncindex"
<ide> "github.com/docker/docker/runconfig"
<add> "github.com/docker/docker/volume"
<ide> volumedrivers "github.com/docker/docker/volume/drivers"
<ide> "github.com/docker/docker/volume/local"
<ide> "github.com/docker/docker/volume/store"
<ide> import (
<ide> //
<ide>
<ide> func TestGet(t *testing.T) {
<del> c1 := &Container{
<del> CommonContainer: CommonContainer{
<add> c1 := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
<ide> Name: "tender_bardeen",
<ide> },
<ide> }
<ide>
<del> c2 := &Container{
<del> CommonContainer: CommonContainer{
<add> c2 := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de",
<ide> Name: "drunk_hawking",
<ide> },
<ide> }
<ide>
<del> c3 := &Container{
<del> CommonContainer: CommonContainer{
<add> c3 := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57",
<ide> Name: "3cdbd1aa",
<ide> },
<ide> }
<ide>
<del> c4 := &Container{
<del> CommonContainer: CommonContainer{
<add> c4 := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5",
<ide> Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57",
<ide> },
<ide> }
<ide>
<del> c5 := &Container{
<del> CommonContainer: CommonContainer{
<add> c5 := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b",
<ide> Name: "d22d69a2b896",
<ide> },
<ide> }
<ide>
<ide> store := &contStore{
<del> s: map[string]*Container{
<add> s: map[string]*container.Container{
<ide> c1.ID: c1,
<ide> c2.ID: c2,
<ide> c3.ID: c3,
<ide> func initDaemonWithVolumeStore(tmp string) (*Daemon, error) {
<ide> }
<ide>
<ide> func TestParseSecurityOpt(t *testing.T) {
<del> container := &Container{}
<add> container := &container.Container{}
<ide> config := &runconfig.HostConfig{}
<ide>
<ide> // test apparmor
<ide> func TestNetworkOptions(t *testing.T) {
<ide> t.Fatalf("Expected networkOptions error, got nil")
<ide> }
<ide> }
<add>
<add>func TestGetFullName(t *testing.T) {
<add> name, err := GetFullContainerName("testing")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> if name != "/testing" {
<add> t.Fatalf("Expected /testing got %s", name)
<add> }
<add> if _, err := GetFullContainerName(""); err == nil {
<add> t.Fatal("Error should not be nil")
<add> }
<add>}
<add>
<add>func TestValidContainerNames(t *testing.T) {
<add> invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
<add> validNames := []string{"word-word", "word_word", "1weoid"}
<add>
<add> for _, name := range invalidNames {
<add> if validContainerNamePattern.MatchString(name) {
<add> t.Fatalf("%q is not a valid container name and was returned as valid.", name)
<add> }
<add> }
<add>
<add> for _, name := range validNames {
<add> if !validContainerNamePattern.MatchString(name) {
<add> t.Fatalf("%q is a valid container name and was returned as invalid.", name)
<add> }
<add> }
<add>}
<add>
<add>func TestContainerInitDNS(t *testing.T) {
<add> tmp, err := ioutil.TempDir("", "docker-container-test-")
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> defer os.RemoveAll(tmp)
<add>
<add> containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e"
<add> containerPath := filepath.Join(tmp, containerID)
<add> if err := os.MkdirAll(containerPath, 0755); err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0,
<add>"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"},
<add>"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top",
<add>"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"",
<add>"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true,
<add>"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null,
<add>"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95",
<add>"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1",
<add>"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}},
<add>"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf",
<add>"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname",
<add>"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts",
<add>"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log",
<add>"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0,
<add>"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}`
<add>
<add> // Container struct only used to retrieve path to config file
<add> container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}}
<add> configPath, err := container.ConfigPath()
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"",
<add>"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null,
<add>"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0},
<add>"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}`
<add>
<add> hostConfigPath, err := container.HostConfigPath()
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> daemon, err := initDaemonWithVolumeStore(tmp)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add> defer volumedrivers.Unregister(volume.DefaultDriverName)
<add>
<add> c, err := daemon.load(containerID)
<add> if err != nil {
<add> t.Fatal(err)
<add> }
<add>
<add> if c.HostConfig.DNS == nil {
<add> t.Fatal("Expected container DNS to not be nil")
<add> }
<add>
<add> if c.HostConfig.DNSSearch == nil {
<add> t.Fatal("Expected container DNSSearch to not be nil")
<add> }
<add>
<add> if c.HostConfig.DNSOptions == nil {
<add> t.Fatal("Expected container DNSOptions to not be nil")
<add> }
<add>}
<ide><path>daemon/daemon_unix.go
<ide> import (
<ide> "syscall"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/graphdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/image"
<ide> func getBlkioWeightDevices(config *runconfig.HostConfig) ([]*blkiodev.WeightDevi
<ide> return BlkioWeightDevices, nil
<ide> }
<ide>
<del>func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
<add>func parseSecurityOpt(container *container.Container, config *runconfig.HostConfig) error {
<ide> var (
<ide> labelOpts []string
<ide> err error
<ide> func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, a
<ide> hostConfig.MemorySwap = hostConfig.Memory * 2
<ide> }
<ide> if hostConfig.ShmSize == nil {
<del> shmSize := DefaultSHMSize
<add> shmSize := container.DefaultSHMSize
<ide> hostConfig.ShmSize = &shmSize
<ide> }
<ide> var err error
<ide> func setupInitLayer(initLayer string, rootUID, rootGID int) error {
<ide> }
<ide>
<ide> // registerLinks writes the links to a file.
<del>func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
<add>func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *runconfig.HostConfig) error {
<ide> if hostConfig == nil || hostConfig.Links == nil {
<ide> return nil
<ide> }
<ide> func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.
<ide> //An error from daemon.Get() means this name could not be found
<ide> return fmt.Errorf("Could not get container for %s", name)
<ide> }
<del> for child.hostConfig.NetworkMode.IsContainer() {
<del> parts := strings.SplitN(string(child.hostConfig.NetworkMode), ":", 2)
<add> for child.HostConfig.NetworkMode.IsContainer() {
<add> parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
<ide> child, err = daemon.Get(parts[1])
<ide> if err != nil {
<ide> return fmt.Errorf("Could not get container for %s", parts[1])
<ide> }
<ide> }
<del> if child.hostConfig.NetworkMode.IsHost() {
<add> if child.HostConfig.NetworkMode.IsHost() {
<ide> return runconfig.ErrConflictHostNetworkAndLinks
<ide> }
<ide> if err := daemon.registerLink(container, child, alias); err != nil {
<ide> func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.
<ide> // After we load all the links into the daemon
<ide> // set them to nil on the hostconfig
<ide> hostConfig.Links = nil
<del> if err := container.writeHostConfig(); err != nil {
<add> if err := container.WriteHostConfig(); err != nil {
<ide> return err
<ide> }
<ide>
<ide> func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.
<ide>
<ide> // conditionalMountOnStart is a platform specific helper function during the
<ide> // container start to call mount.
<del>func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
<add>func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
<ide> return daemon.Mount(container)
<ide> }
<ide>
<ide> // conditionalUnmountOnCleanup is a platform specific helper function called
<ide> // during the cleanup of a container to unmount.
<del>func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
<add>func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) {
<ide> daemon.Unmount(container)
<ide> }
<ide>
<ide><path>daemon/daemon_windows.go
<ide> import (
<ide>
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/distribution/reference"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/graphdriver"
<ide> "github.com/docker/docker/dockerversion"
<ide> "github.com/docker/docker/image"
<ide> func getBlkioWeightDevices(config *runconfig.HostConfig) ([]*blkiodev.WeightDevi
<ide> return nil, nil
<ide> }
<ide>
<del>func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error {
<add>func parseSecurityOpt(container *container.Container, config *runconfig.HostConfig) error {
<ide> return nil
<ide> }
<ide>
<ide> func (daemon *Daemon) initNetworkController(config *Config) (libnetwork.NetworkC
<ide>
<ide> // registerLinks sets up links between containers and writes the
<ide> // configuration out for persistence. As of Windows TP4, links are not supported.
<del>func (daemon *Daemon) registerLinks(container *Container, hostConfig *runconfig.HostConfig) error {
<add>func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *runconfig.HostConfig) error {
<ide> return nil
<ide> }
<ide>
<ide> func (daemon *Daemon) cleanupMounts() error {
<ide>
<ide> // conditionalMountOnStart is a platform specific helper function during the
<ide> // container start to call mount.
<del>func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
<add>func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
<ide> // We do not mount if a Hyper-V container
<del> if !container.hostConfig.Isolation.IsHyperV() {
<add> if !container.HostConfig.Isolation.IsHyperV() {
<ide> if err := daemon.Mount(container); err != nil {
<ide> return err
<ide> }
<ide> func (daemon *Daemon) conditionalMountOnStart(container *Container) error {
<ide>
<ide> // conditionalUnmountOnCleanup is a platform specific helper function called
<ide> // during the cleanup of a container to unmount.
<del>func (daemon *Daemon) conditionalUnmountOnCleanup(container *Container) {
<add>func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) {
<ide> // We do not unmount if a Hyper-V container
<del> if !container.hostConfig.Isolation.IsHyperV() {
<add> if !container.HostConfig.Isolation.IsHyperV() {
<ide> daemon.Unmount(container)
<ide> }
<ide> }
<ide><path>daemon/daemonbuilder/builder.go
<ide> import (
<ide> "github.com/docker/docker/api"
<ide> "github.com/docker/docker/builder"
<ide> "github.com/docker/docker/cliconfig"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/archive"
<ide> func (d Docker) Pull(name string) (*image.Image, error) {
<ide> }
<ide>
<ide> // Container looks up a Docker container referenced by `id`.
<del>func (d Docker) Container(id string) (*daemon.Container, error) {
<add>func (d Docker) Container(id string) (*container.Container, error) {
<ide> return d.Daemon.Get(id)
<ide> }
<ide>
<ide> // Create creates a new Docker container and returns potential warnings
<del>func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*daemon.Container, []string, error) {
<add>func (d Docker) Create(cfg *runconfig.Config, hostCfg *runconfig.HostConfig) (*container.Container, []string, error) {
<ide> ccr, err := d.Daemon.ContainerCreate(&daemon.ContainerCreateConfig{
<ide> Name: "",
<ide> Config: cfg,
<ide> func (d Docker) Release(sessionID string, activeImages []string) {
<ide> // specified by a container object.
<ide> // TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already).
<ide> // Copy should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths.
<del>func (d Docker) Copy(c *daemon.Container, destPath string, src builder.FileInfo, decompress bool) error {
<add>func (d Docker) Copy(c *container.Container, destPath string, src builder.FileInfo, decompress bool) error {
<ide> srcPath := src.Path()
<ide> destExists := true
<ide> rootUID, rootGID := d.Daemon.GetRemappedUIDGID()
<ide> func (d Docker) GetCachedImage(imgID string, cfg *runconfig.Config) (string, err
<ide> }
<ide>
<ide> // Kill stops the container execution abruptly.
<del>func (d Docker) Kill(container *daemon.Container) error {
<add>func (d Docker) Kill(container *container.Container) error {
<ide> return d.Daemon.Kill(container)
<ide> }
<ide>
<ide> // Mount mounts the root filesystem for the container.
<del>func (d Docker) Mount(c *daemon.Container) error {
<add>func (d Docker) Mount(c *container.Container) error {
<ide> return d.Daemon.Mount(c)
<ide> }
<ide>
<ide> // Unmount unmounts the root filesystem for the container.
<del>func (d Docker) Unmount(c *daemon.Container) error {
<add>func (d Docker) Unmount(c *container.Container) error {
<ide> d.Daemon.Unmount(c)
<ide> return nil
<ide> }
<ide>
<ide> // Start starts a container
<del>func (d Docker) Start(c *daemon.Container) error {
<add>func (d Docker) Start(c *container.Container) error {
<ide> return d.Daemon.Start(c)
<ide> }
<ide>
<ide><path>daemon/delete.go
<ide> import (
<ide> "path"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/layer"
<ide> volumestore "github.com/docker/docker/volume/store"
<ide> func (daemon *Daemon) ContainerRm(name string, config *ContainerRmConfig) error
<ide> }
<ide>
<ide> // Container state RemovalInProgress should be used to avoid races.
<del> if err = container.setRemovalInProgress(); err != nil {
<add> if err = container.SetRemovalInProgress(); err != nil {
<ide> if err == derr.ErrorCodeAlreadyRemoving {
<ide> // do not fail when the removal is in progress started by other request.
<ide> return nil
<ide> }
<ide> return derr.ErrorCodeRmState.WithArgs(err)
<ide> }
<del> defer container.resetRemovalInProgress()
<add> defer container.ResetRemovalInProgress()
<ide>
<ide> // check if container wasn't deregistered by previous rm since Get
<ide> if c := daemon.containers.Get(container.ID); c == nil {
<ide> func (daemon *Daemon) rmLink(name string) error {
<ide>
<ide> // cleanupContainer unregisters a container from the daemon, stops stats
<ide> // collection and cleanly removes contents and metadata from the filesystem.
<del>func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) (err error) {
<add>func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) {
<ide> if container.IsRunning() {
<ide> if !forceRemove {
<ide> return derr.ErrorCodeRmRunning
<ide> func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) (
<ide> }
<ide>
<ide> // Mark container dead. We don't want anybody to be restarting it.
<del> container.setDead()
<add> container.SetDead()
<ide>
<ide> // Save container state to disk. So that if error happens before
<ide> // container meta file got removed from disk, then a restart of
<ide> // docker should not make a dead container alive.
<del> if err := container.toDiskLocking(); err != nil {
<add> if err := container.ToDiskLocking(); err != nil {
<ide> logrus.Errorf("Error saving dying container to disk: %v", err)
<ide> }
<ide>
<ide> func (daemon *Daemon) cleanupContainer(container *Container, forceRemove bool) (
<ide> }
<ide> }()
<ide>
<del> if err = os.RemoveAll(container.root); err != nil {
<add> if err = os.RemoveAll(container.Root); err != nil {
<ide> return derr.ErrorCodeRmFS.WithArgs(container.ID, err)
<ide> }
<ide>
<ide><path>daemon/delete_test.go
<ide> import (
<ide> "os"
<ide> "testing"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<ide> func TestContainerDoubleDelete(t *testing.T) {
<ide> repository: tmp,
<ide> root: tmp,
<ide> }
<del> daemon.containers = &contStore{s: make(map[string]*Container)}
<add> daemon.containers = &contStore{s: make(map[string]*container.Container)}
<ide>
<del> container := &Container{
<del> CommonContainer: CommonContainer{
<add> container := &container.Container{
<add> CommonContainer: container.CommonContainer{
<ide> ID: "test",
<del> State: NewState(),
<add> State: container.NewState(),
<ide> Config: &runconfig.Config{},
<ide> },
<ide> }
<ide> daemon.containers.Add(container.ID, container)
<ide>
<ide> // Mark the container as having a delete in progress
<del> if err := container.setRemovalInProgress(); err != nil {
<add> if err := container.SetRemovalInProgress(); err != nil {
<ide> t.Fatal(err)
<ide> }
<ide>
<ide><path>daemon/events.go
<ide> package daemon
<ide>
<add>import (
<add> "github.com/docker/docker/container"
<add>)
<add>
<ide> // LogContainerEvent generates an event related to a container.
<del>func (daemon *Daemon) LogContainerEvent(container *Container, action string) {
<add>func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) {
<ide> daemon.EventsService.Log(
<ide> action,
<ide> container.ID,
<ide><path>daemon/exec.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/exec"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> import (
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<del>func (d *Daemon) registerExecCommand(container *Container, config *exec.Config) {
<add>func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
<ide> // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
<del> container.execCommands.Add(config.ID, config)
<add> container.ExecCommands.Add(config.ID, config)
<ide> // Storing execs in daemon for easy access via remote API.
<ide> d.execCommands.Add(config.ID, config)
<ide> }
<ide>
<ide> // ExecExists looks up the exec instance and returns a bool if it exists or not.
<del>// It will also return the error produced by `getExecConfig`
<add>// It will also return the error produced by `getConfig`
<ide> func (d *Daemon) ExecExists(name string) (bool, error) {
<ide> if _, err := d.getExecConfig(name); err != nil {
<ide> return false, err
<ide> func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
<ide> if !container.IsRunning() {
<ide> return nil, derr.ErrorCodeContainerNotRunning.WithArgs(container.ID, container.State.String())
<ide> }
<del> if container.isPaused() {
<add> if container.IsPaused() {
<ide> return nil, derr.ErrorCodeExecPaused.WithArgs(container.ID)
<ide> }
<ide> return ec, nil
<ide> func (d *Daemon) getExecConfig(name string) (*exec.Config, error) {
<ide> return nil, derr.ErrorCodeNoExecID.WithArgs(name)
<ide> }
<ide>
<del>func (d *Daemon) unregisterExecCommand(container *Container, execConfig *exec.Config) {
<del> container.execCommands.Delete(execConfig.ID)
<add>func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
<add> container.ExecCommands.Delete(execConfig.ID)
<ide> d.execCommands.Delete(execConfig.ID)
<ide> }
<ide>
<del>func (d *Daemon) getActiveContainer(name string) (*Container, error) {
<add>func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
<ide> container, err := d.Get(name)
<ide> if err != nil {
<ide> return nil, err
<ide> func (d *Daemon) getActiveContainer(name string) (*Container, error) {
<ide> if !container.IsRunning() {
<ide> return nil, derr.ErrorCodeNotRunning.WithArgs(name)
<ide> }
<del> if container.isPaused() {
<add> if container.IsPaused() {
<ide> return nil, derr.ErrorCodeExecPaused.WithArgs(name)
<ide> }
<ide> return container, nil
<ide> func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
<ide> ec.Running = true
<ide> ec.Unlock()
<ide>
<del> container := d.containers.Get(ec.ContainerID)
<del> logrus.Debugf("starting exec command %s in container %s", ec.ID, container.ID)
<del> d.LogContainerEvent(container, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " "))
<add> c := d.containers.Get(ec.ContainerID)
<add> logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID)
<add> d.LogContainerEvent(c, "exec_start: "+ec.ProcessConfig.Entrypoint+" "+strings.Join(ec.ProcessConfig.Arguments, " "))
<ide>
<ide> if ec.OpenStdin {
<ide> r, w := io.Pipe()
<ide> func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
<ide> ec.NewNopInputPipe()
<ide> }
<ide>
<del> attachErr := attach(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr)
<del>
<add> attachErr := container.AttachStreams(ec.StreamConfig, ec.OpenStdin, true, ec.ProcessConfig.Tty, cStdin, cStdout, cStderr)
<ide> execErr := make(chan error)
<ide>
<ide> // Note, the ExecConfig data will be removed when the container
<ide> // itself is deleted. This allows us to query it (for things like
<ide> // the exitStatus) even after the cmd is done running.
<ide>
<ide> go func() {
<del> execErr <- d.containerExec(container, ec)
<add> execErr <- d.containerExec(c, ec)
<ide> }()
<ide>
<ide> select {
<ide> func (d *Daemon) ContainerExecStart(name string, stdin io.ReadCloser, stdout io.
<ide> }
<ide>
<ide> // Maybe the container stopped while we were trying to exec
<del> if !container.IsRunning() {
<add> if !c.IsRunning() {
<ide> return derr.ErrorCodeExecContainerStopped
<ide> }
<del> return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, container.ID, err)
<add> return derr.ErrorCodeExecCantRun.WithArgs(ec.ID, c.ID, err)
<ide> }
<ide> }
<ide>
<ide> // Exec calls the underlying exec driver to run
<del>func (d *Daemon) Exec(c *Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
<add>func (d *Daemon) Exec(c *container.Container, execConfig *exec.Config, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) {
<ide> hooks := execdriver.Hooks{
<ide> Start: startCallback,
<ide> }
<del> exitStatus, err := d.execDriver.Exec(c.command, execConfig.ProcessConfig, pipes, hooks)
<add> exitStatus, err := d.execDriver.Exec(c.Command, execConfig.ProcessConfig, pipes, hooks)
<ide>
<ide> // On err, make sure we don't leave ExitCode at zero
<ide> if err != nil && exitStatus == 0 {
<ide> func (d *Daemon) execCommandGC() {
<ide> func (d *Daemon) containerExecIds() map[string]struct{} {
<ide> ids := map[string]struct{}{}
<ide> for _, c := range d.containers.List() {
<del> for _, id := range c.execCommands.List() {
<add> for _, id := range c.ExecCommands.List() {
<ide> ids[id] = struct{}{}
<ide> }
<ide> }
<ide> return ids
<ide> }
<ide>
<del>func (d *Daemon) containerExec(container *Container, ec *exec.Config) error {
<add>func (d *Daemon) containerExec(container *container.Container, ec *exec.Config) error {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (d *Daemon) containerExec(container *Container, ec *exec.Config) error {
<ide> return ec.Wait(cErr)
<ide> }
<ide>
<del>func (d *Daemon) monitorExec(container *Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
<add>func (d *Daemon) monitorExec(container *container.Container, execConfig *exec.Config, callback execdriver.DriverCallback) error {
<ide> pipes := execdriver.NewPipes(execConfig.Stdin(), execConfig.Stdout(), execConfig.Stderr(), execConfig.OpenStdin)
<ide> exitCode, err := d.Exec(container, execConfig, pipes, callback)
<ide> if err != nil {
<ide> func (d *Daemon) monitorExec(container *Container, execConfig *exec.Config, call
<ide> }
<ide> // remove the exec command from the container's store only and not the
<ide> // daemon's store so that the exec command can be inspected.
<del> container.execCommands.Delete(execConfig.ID)
<add> container.ExecCommands.Delete(execConfig.ID)
<ide> return err
<ide> }
<ide><path>daemon/exec_unix.go
<ide> package daemon
<ide>
<ide> import (
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<ide> // setPlatformSpecificExecProcessConfig sets platform-specific fields in the
<ide> // ProcessConfig structure.
<del>func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *Container, pc *execdriver.ProcessConfig) {
<add>func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) {
<ide> user := config.User
<ide> if len(user) == 0 {
<ide> user = container.Config.User
<ide><path>daemon/exec_windows.go
<ide> package daemon
<ide>
<ide> import (
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<ide> // setPlatformSpecificExecProcessConfig sets platform-specific fields in the
<ide> // ProcessConfig structure. This is a no-op on Windows
<del>func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *Container, pc *execdriver.ProcessConfig) {
<add>func setPlatformSpecificExecProcessConfig(config *runconfig.ExecConfig, container *container.Container, pc *execdriver.ProcessConfig) {
<ide> }
<ide><path>daemon/export.go
<ide> package daemon
<ide> import (
<ide> "io"
<ide>
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/pkg/archive"
<ide> "github.com/docker/docker/pkg/ioutils"
<ide> func (daemon *Daemon) ContainerExport(name string, out io.Writer) error {
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) containerExport(container *Container) (archive.Archive, error) {
<add>func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) {
<ide> if err := daemon.Mount(container); err != nil {
<ide> return nil, err
<ide> }
<ide>
<ide> uidMaps, gidMaps := daemon.GetUIDGIDMaps()
<del> archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{
<add> archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
<ide> Compression: archive.Uncompressed,
<ide> UIDMaps: uidMaps,
<ide> GIDMaps: gidMaps,
<ide><path>daemon/history.go
<ide> package daemon
<ide>
<ide> import (
<ide> "sort"
<add>
<add> "github.com/docker/docker/container"
<ide> )
<ide>
<ide> // History is a convenience type for storing a list of containers,
<ide> // ordered by creation date.
<del>type History []*Container
<add>type History []*container.Container
<ide>
<ide> func (history *History) Len() int {
<ide> return len(*history)
<ide> func (history *History) Swap(i, j int) {
<ide> }
<ide>
<ide> // Add the given container to history.
<del>func (history *History) Add(container *Container) {
<add>func (history *History) Add(container *container.Container) {
<ide> *history = append(*history, container)
<ide> }
<ide>
<ide><path>daemon/image_delete.go
<ide> import (
<ide>
<ide> "github.com/docker/distribution/reference"
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/stringid"
<ide> func isImageIDPrefix(imageID, possiblePrefix string) bool {
<ide>
<ide> // getContainerUsingImage returns a container that was created using the given
<ide> // imageID. Returns nil if there is no such container.
<del>func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *Container {
<add>func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container {
<ide> for _, container := range daemon.List() {
<ide> if container.ImageID == imageID {
<ide> return container
<ide><path>daemon/inspect.go
<ide> import (
<ide>
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/versions/v1p20"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/exec"
<ide> "github.com/docker/docker/daemon/network"
<ide> "github.com/docker/docker/layer"
<ide> func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
<ide> MacAddress: container.Config.MacAddress,
<ide> NetworkDisabled: container.Config.NetworkDisabled,
<ide> ExposedPorts: container.Config.ExposedPorts,
<del> VolumeDriver: container.hostConfig.VolumeDriver,
<add> VolumeDriver: container.HostConfig.VolumeDriver,
<ide> }
<ide> networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings)
<ide>
<ide> func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, er
<ide> }, nil
<ide> }
<ide>
<del>func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.ContainerJSONBase, error) {
<add>func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) {
<ide> // make a copy to play with
<del> hostConfig := *container.hostConfig
<add> hostConfig := *container.HostConfig
<ide>
<ide> if children, err := daemon.children(container.Name); err == nil {
<ide> for linkAlias, child := range children {
<ide> func (daemon *Daemon) getInspectData(container *Container, size bool) (*types.Co
<ide> Driver: container.Driver,
<ide> MountLabel: container.MountLabel,
<ide> ProcessLabel: container.ProcessLabel,
<del> ExecIDs: container.getExecIDs(),
<add> ExecIDs: container.GetExecIDs(),
<ide> HostConfig: &hostConfig,
<ide> }
<ide>
<ide><path>daemon/inspect_unix.go
<ide> package daemon
<ide> import (
<ide> "github.com/docker/docker/api/types"
<ide> "github.com/docker/docker/api/types/versions/v1p19"
<add> "github.com/docker/docker/container"
<ide> )
<ide>
<ide> // This sets platform-specific fields
<del>func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
<add>func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
<ide> contJSONBase.AppArmorProfile = container.AppArmorProfile
<ide> contJSONBase.ResolvConfPath = container.ResolvConfPath
<ide> contJSONBase.HostnamePath = container.HostnamePath
<ide> func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON,
<ide> MacAddress: container.Config.MacAddress,
<ide> NetworkDisabled: container.Config.NetworkDisabled,
<ide> ExposedPorts: container.Config.ExposedPorts,
<del> VolumeDriver: container.hostConfig.VolumeDriver,
<del> Memory: container.hostConfig.Memory,
<del> MemorySwap: container.hostConfig.MemorySwap,
<del> CPUShares: container.hostConfig.CPUShares,
<del> CPUSet: container.hostConfig.CpusetCpus,
<add> VolumeDriver: container.HostConfig.VolumeDriver,
<add> Memory: container.HostConfig.Memory,
<add> MemorySwap: container.HostConfig.MemorySwap,
<add> CPUShares: container.HostConfig.CPUShares,
<add> CPUSet: container.HostConfig.CpusetCpus,
<ide> }
<ide> networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings)
<ide>
<ide> func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON,
<ide> }, nil
<ide> }
<ide>
<del>func addMountPoints(container *Container) []types.MountPoint {
<add>func addMountPoints(container *container.Container) []types.MountPoint {
<ide> mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
<ide> for _, m := range container.MountPoints {
<ide> mountPoints = append(mountPoints, types.MountPoint{
<ide><path>daemon/inspect_windows.go
<ide> package daemon
<ide>
<del>import "github.com/docker/docker/api/types"
<add>import (
<add> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<add>)
<ide>
<ide> // This sets platform-specific fields
<del>func setPlatformSpecificContainerFields(container *Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
<add>func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
<ide> return contJSONBase
<ide> }
<ide>
<del>func addMountPoints(container *Container) []types.MountPoint {
<add>func addMountPoints(container *container.Container) []types.MountPoint {
<ide> mountPoints := make([]types.MountPoint, 0, len(container.MountPoints))
<ide> for _, m := range container.MountPoints {
<ide> mountPoints = append(mountPoints, types.MountPoint{
<ide><path>daemon/kill.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/pkg/signal"
<ide> )
<ide> func (daemon *Daemon) ContainerKill(name string, sig uint64) error {
<ide> // to send the signal. An error is returned if the container is paused
<ide> // or not running, or if there is a problem returned from the
<ide> // underlying kill command.
<del>func (daemon *Daemon) killWithSignal(container *Container, sig int) error {
<add>func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error {
<ide> logrus.Debugf("Sending %d to %s", sig, container.ID)
<ide> container.Lock()
<ide> defer container.Unlock()
<ide> func (daemon *Daemon) killWithSignal(container *Container, sig int) error {
<ide> }
<ide>
<ide> // Kill forcefully terminates a container.
<del>func (daemon *Daemon) Kill(container *Container) error {
<add>func (daemon *Daemon) Kill(container *container.Container) error {
<ide> if !container.IsRunning() {
<ide> return derr.ErrorCodeNotRunning.WithArgs(container.ID)
<ide> }
<ide> func (daemon *Daemon) Kill(container *Container) error {
<ide> }
<ide>
<ide> // killPossibleDeadProcess is a wrapper aroung killSig() suppressing "no such process" error.
<del>func (daemon *Daemon) killPossiblyDeadProcess(container *Container, sig int) error {
<add>func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error {
<ide> err := daemon.killWithSignal(container, sig)
<ide> if err == syscall.ESRCH {
<ide> logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPID(), sig)
<ide><path>daemon/list.go
<ide> import (
<ide>
<ide> "github.com/Sirupsen/logrus"
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/image"
<ide> "github.com/docker/docker/pkg/graphdb"
<ide> "github.com/docker/docker/pkg/nat"
<ide> type iterationAction int
<ide>
<ide> // containerReducer represents a reducer for a container.
<ide> // Returns the object to serialize by the api.
<del>type containerReducer func(*Container, *listContext) (*types.Container, error)
<add>type containerReducer func(*container.Container, *listContext) (*types.Container, error)
<ide>
<ide> const (
<ide> // includeContainer is the action to include a container in the reducer.
<ide> const (
<ide> var errStopIteration = errors.New("container list iteration stopped")
<ide>
<ide> // List returns an array of all containers registered in the daemon.
<del>func (daemon *Daemon) List() []*Container {
<add>func (daemon *Daemon) List() []*container.Container {
<ide> return daemon.containers.List()
<ide> }
<ide>
<ide> type listContext struct {
<ide> exitAllowed []int
<ide> // beforeFilter is a filter to ignore containers that appear before the one given
<ide> // this is used for --filter=before= and --before=, the latter is deprecated.
<del> beforeFilter *Container
<add> beforeFilter *container.Container
<ide> // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container
<ide> // this is used for --filter=since= and --since=, the latter is deprecated.
<del> sinceFilter *Container
<add> sinceFilter *container.Container
<ide> // ContainersConfig is the filters set by the user
<ide> *ContainersConfig
<ide> }
<ide> func (daemon *Daemon) reduceContainers(config *ContainersConfig, reducer contain
<ide> }
<ide>
<ide> // reducePsContainer is the basic representation for a container as expected by the ps command.
<del>func (daemon *Daemon) reducePsContainer(container *Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
<add>func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error)
<ide> }
<ide>
<ide> err = psFilters.WalkValues("status", func(value string) error {
<del> if !isValidStateString(value) {
<add> if !container.IsValidStateString(value) {
<ide> return fmt.Errorf("Unrecognised filter value for status: %s", value)
<ide> }
<ide>
<ide> func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error)
<ide> return nil, err
<ide> }
<ide>
<del> var beforeContFilter, sinceContFilter *Container
<add> var beforeContFilter, sinceContFilter *container.Container
<ide> err = psFilters.WalkValues("before", func(value string) error {
<ide> beforeContFilter, err = daemon.Get(value)
<ide> return err
<ide> func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error)
<ide>
<ide> // includeContainerInList decides whether a containers should be include in the output or not based in the filter.
<ide> // It also decides if the iteration should be stopped or not.
<del>func includeContainerInList(container *Container, ctx *listContext) iterationAction {
<add>func includeContainerInList(container *container.Container, ctx *listContext) iterationAction {
<ide> // Do not include container if it's stopped and we're not filters
<ide> if !container.Running && !ctx.All && ctx.Limit <= 0 && ctx.beforeFilter == nil && ctx.sinceFilter == nil {
<ide> return excludeContainer
<ide> func includeContainerInList(container *Container, ctx *listContext) iterationAct
<ide> }
<ide>
<ide> // transformContainer generates the container type expected by the docker ps command.
<del>func (daemon *Daemon) transformContainer(container *Container, ctx *listContext) (*types.Container, error) {
<add>func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) {
<ide> newC := &types.Container{
<ide> ID: container.ID,
<ide> Names: ctx.names[container.ID],
<ide> func (daemon *Daemon) transformContainer(container *Container, ctx *listContext)
<ide> }
<ide> newC.Created = container.Created.Unix()
<ide> newC.Status = container.State.String()
<del> newC.HostConfig.NetworkMode = string(container.hostConfig.NetworkMode)
<add> newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
<ide>
<ide> newC.Ports = []types.Port{}
<ide> for port, bindings := range container.NetworkSettings.Ports {
<ide><path>daemon/list_unix.go
<ide>
<ide> package daemon
<ide>
<add>import "github.com/docker/docker/container"
<add>
<ide> // excludeByIsolation is a platform specific helper function to support PS
<ide> // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix.
<del>func excludeByIsolation(container *Container, ctx *listContext) iterationAction {
<add>func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
<ide> return includeContainer
<ide> }
<ide><path>daemon/list_windows.go
<ide> package daemon
<ide>
<del>import "strings"
<add>import (
<add> "strings"
<add>
<add> "github.com/docker/docker/container"
<add>)
<ide>
<ide> // excludeByIsolation is a platform specific helper function to support PS
<ide> // filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix.
<del>func excludeByIsolation(container *Container, ctx *listContext) iterationAction {
<del> i := strings.ToLower(string(container.hostConfig.Isolation))
<add>func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction {
<add> i := strings.ToLower(string(container.HostConfig.Isolation))
<ide> if i == "" {
<ide> i = "default"
<ide> }
<ide><path>daemon/logs.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/logger"
<ide> "github.com/docker/docker/daemon/logger/jsonfilelog"
<ide> derr "github.com/docker/docker/errors"
<ide> func (daemon *Daemon) ContainerLogs(containerName string, config *ContainerLogsC
<ide> }
<ide> }
<ide>
<del>func (daemon *Daemon) getLogger(container *Container) (logger.Logger, error) {
<del> if container.logDriver != nil && container.IsRunning() {
<del> return container.logDriver, nil
<add>func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) {
<add> if container.LogDriver != nil && container.IsRunning() {
<add> return container.LogDriver, nil
<ide> }
<del> cfg := container.getLogConfig(daemon.defaultLogConfig)
<add> cfg := container.GetLogConfig(daemon.defaultLogConfig)
<ide> if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil {
<ide> return nil, err
<ide> }
<ide> return container.StartLogger(cfg)
<ide> }
<ide>
<ide> // StartLogging initializes and starts the container logging stream.
<del>func (daemon *Daemon) StartLogging(container *Container) error {
<del> cfg := container.getLogConfig(daemon.defaultLogConfig)
<add>func (daemon *Daemon) StartLogging(container *container.Container) error {
<add> cfg := container.GetLogConfig(daemon.defaultLogConfig)
<ide> if cfg.Type == "none" {
<ide> return nil // do not start logging routines
<ide> }
<ide> func (daemon *Daemon) StartLogging(container *Container) error {
<ide> }
<ide>
<ide> copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l)
<del> container.logCopier = copier
<add> container.LogCopier = copier
<ide> copier.Run()
<del> container.logDriver = l
<add> container.LogDriver = l
<ide>
<ide> // set LogPath field only for json-file logdriver
<ide> if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok {
<ide><path>daemon/mounts.go
<ide> package daemon
<ide> import (
<ide> "strings"
<ide>
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> volumestore "github.com/docker/docker/volume/store"
<ide> )
<ide>
<del>func (daemon *Daemon) prepareMountPoints(container *Container) error {
<add>func (daemon *Daemon) prepareMountPoints(container *container.Container) error {
<ide> for _, config := range container.MountPoints {
<ide> if len(config.Driver) > 0 {
<ide> v, err := daemon.createVolume(config.Name, config.Driver, nil)
<ide> func (daemon *Daemon) prepareMountPoints(container *Container) error {
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) removeMountPoints(container *Container, rm bool) error {
<add>func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error {
<ide> var rmErrors []string
<ide> for _, m := range container.MountPoints {
<ide> if m.Volume == nil {
<ide><path>daemon/pause.go
<ide> package daemon
<ide>
<ide> import (
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> )
<ide>
<ide> func (daemon *Daemon) ContainerPause(name string) error {
<ide>
<ide> // containerPause pauses the container execution without stopping the process.
<ide> // The execution can be resumed by calling containerUnpause.
<del>func (daemon *Daemon) containerPause(container *Container) error {
<add>func (daemon *Daemon) containerPause(container *container.Container) error {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (daemon *Daemon) containerPause(container *Container) error {
<ide> return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID)
<ide> }
<ide>
<del> if err := daemon.execDriver.Pause(container.command); err != nil {
<add> if err := daemon.execDriver.Pause(container.Command); err != nil {
<ide> return err
<ide> }
<ide> container.Paused = true
<ide><path>daemon/rename.go
<ide> import (
<ide> // reserved.
<ide> func (daemon *Daemon) ContainerRename(oldName, newName string) error {
<ide> var (
<del> err error
<del> sid string
<del> sb libnetwork.Sandbox
<del> container *Container
<add> sid string
<add> sb libnetwork.Sandbox
<ide> )
<ide>
<ide> if oldName == "" || newName == "" {
<ide> return derr.ErrorCodeEmptyRename
<ide> }
<ide>
<del> container, err = daemon.Get(oldName)
<add> container, err := daemon.Get(oldName)
<ide> if err != nil {
<ide> return err
<ide> }
<ide> func (daemon *Daemon) ContainerRename(oldName, newName string) error {
<ide> return derr.ErrorCodeRenameDelete.WithArgs(oldName, err)
<ide> }
<ide>
<del> if err = container.toDisk(); err != nil {
<add> if err = container.ToDisk(); err != nil {
<ide> return err
<ide> }
<ide>
<ide> func (daemon *Daemon) ContainerRename(oldName, newName string) error {
<ide> defer func() {
<ide> if err != nil {
<ide> container.Name = oldName
<del> if e := container.toDisk(); e != nil {
<add> if e := container.ToDisk(); e != nil {
<ide> logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e)
<ide> }
<ide> }
<ide><path>daemon/restart.go
<ide> package daemon
<ide>
<ide> import (
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> )
<ide>
<ide> func (daemon *Daemon) ContainerRestart(name string, seconds int) error {
<ide> // container. When stopping, wait for the given duration in seconds to
<ide> // gracefully stop, before forcefully terminating the container. If
<ide> // given a negative duration, wait forever for a graceful stop.
<del>func (daemon *Daemon) containerRestart(container *Container, seconds int) error {
<add>func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error {
<ide> // Avoid unnecessarily unmounting and then directly mounting
<ide> // the container when the container stops and then starts
<ide> // again
<ide><path>daemon/start.go
<ide> import (
<ide> "runtime"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<del> "github.com/docker/docker/pkg/promise"
<ide> "github.com/docker/docker/runconfig"
<ide> )
<ide>
<ide> func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
<ide> return err
<ide> }
<ide>
<del> if container.isPaused() {
<add> if container.IsPaused() {
<ide> return derr.ErrorCodeStartPaused
<ide> }
<ide>
<ide> func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
<ide> if err := daemon.setHostConfig(container, hostConfig); err != nil {
<ide> return err
<ide> }
<del> initDNSHostConfig(container)
<add> container.InitDNSHostConfig()
<ide> }
<ide> } else {
<ide> if hostConfig != nil {
<ide> func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
<ide>
<ide> // check if hostConfig is in line with the current system settings.
<ide> // It may happen cgroups are umounted or the like.
<del> if _, err = daemon.verifyContainerSettings(container.hostConfig, nil); err != nil {
<add> if _, err = daemon.verifyContainerSettings(container.HostConfig, nil); err != nil {
<ide> return err
<ide> }
<ide>
<ide> func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConf
<ide> }
<ide>
<ide> // Start starts a container
<del>func (daemon *Daemon) Start(container *Container) error {
<add>func (daemon *Daemon) Start(container *container.Container) error {
<ide> return daemon.containerStart(container)
<ide> }
<ide>
<ide> // containerStart prepares the container to run by setting up everything the
<ide> // container needs, such as storage and networking, as well as links
<ide> // between containers. The container is left waiting for a signal to
<ide> // begin running.
<del>func (daemon *Daemon) containerStart(container *Container) (err error) {
<add>func (daemon *Daemon) containerStart(container *container.Container) (err error) {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> if container.Running {
<ide> return nil
<ide> }
<ide>
<del> if container.removalInProgress || container.Dead {
<add> if container.RemovalInProgress || container.Dead {
<ide> return derr.ErrorCodeContainerBeingRemoved
<ide> }
<ide>
<ide> // if we encounter an error during start we need to ensure that any other
<ide> // setup has been cleaned up properly
<ide> defer func() {
<ide> if err != nil {
<del> container.setError(err)
<add> container.SetError(err)
<ide> // if no one else has set it, make sure we don't leave it at zero
<ide> if container.ExitCode == 0 {
<ide> container.ExitCode = 128
<ide> }
<del> container.toDisk()
<add> container.ToDisk()
<ide> daemon.Cleanup(container)
<ide> daemon.LogContainerEvent(container, "die")
<ide> }
<ide> func (daemon *Daemon) containerStart(container *Container) (err error) {
<ide>
<ide> // Make sure NetworkMode has an acceptable value. We do this to ensure
<ide> // backwards API compatibility.
<del> container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig)
<add> container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig)
<ide>
<ide> if err := daemon.initializeNetworking(container); err != nil {
<ide> return err
<ide> func (daemon *Daemon) containerStart(container *Container) (err error) {
<ide> if err != nil {
<ide> return err
<ide> }
<del> if err := container.setupWorkingDirectory(); err != nil {
<add> if err := container.SetupWorkingDirectory(); err != nil {
<ide> return err
<ide> }
<del> env := container.createDaemonEnvironment(linkedEnv)
<add> env := container.CreateDaemonEnvironment(linkedEnv)
<ide> if err := daemon.populateCommand(container, env); err != nil {
<ide> return err
<ide> }
<ide>
<del> if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() {
<add> if !container.HostConfig.IpcMode.IsContainer() && !container.HostConfig.IpcMode.IsHost() {
<ide> if err := daemon.setupIpcDirs(container); err != nil {
<ide> return err
<ide> }
<ide> func (daemon *Daemon) containerStart(container *Container) (err error) {
<ide> if err != nil {
<ide> return err
<ide> }
<del> mounts = append(mounts, container.ipcMounts()...)
<del> mounts = append(mounts, container.tmpfsMounts()...)
<add> mounts = append(mounts, container.IpcMounts()...)
<add> mounts = append(mounts, container.TmpfsMounts()...)
<ide>
<del> container.command.Mounts = mounts
<add> container.Command.Mounts = mounts
<ide> if err := daemon.waitForStart(container); err != nil {
<ide> return err
<ide> }
<ide> container.HasBeenStartedBefore = true
<ide> return nil
<ide> }
<ide>
<del>func (daemon *Daemon) waitForStart(container *Container) error {
<del> container.monitor = daemon.newContainerMonitor(container, container.hostConfig.RestartPolicy)
<del>
<del> // block until we either receive an error from the initial start of the container's
<del> // process or until the process is running in the container
<del> select {
<del> case <-container.monitor.startSignal:
<del> case err := <-promise.Go(container.monitor.Start):
<del> return err
<del> }
<del>
<del> return nil
<add>func (daemon *Daemon) waitForStart(container *container.Container) error {
<add> return container.StartMonitor(daemon, container.HostConfig.RestartPolicy)
<ide> }
<ide>
<ide> // Cleanup releases any network resources allocated to the container along with any rules
<ide> // around how containers are linked together. It also unmounts the container's root filesystem.
<del>func (daemon *Daemon) Cleanup(container *Container) {
<add>func (daemon *Daemon) Cleanup(container *container.Container) {
<ide> daemon.releaseNetwork(container)
<ide>
<del> container.unmountIpcMounts(detachMounted)
<add> container.UnmountIpcMounts(detachMounted)
<ide>
<ide> daemon.conditionalUnmountOnCleanup(container)
<ide>
<del> for _, eConfig := range container.execCommands.Commands() {
<add> for _, eConfig := range container.ExecCommands.Commands() {
<ide> daemon.unregisterExecCommand(container, eConfig)
<ide> }
<ide>
<del> if err := container.unmountVolumes(false); err != nil {
<add> if err := container.UnmountVolumes(false); err != nil {
<ide> logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
<ide> }
<ide> }
<ide><path>daemon/stats_collector_unix.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/pkg/pubsub"
<ide> import (
<ide>
<ide> type statsSupervisor interface {
<ide> // GetContainerStats collects all the stats related to a container
<del> GetContainerStats(container *Container) (*execdriver.ResourceStats, error)
<add> GetContainerStats(container *container.Container) (*execdriver.ResourceStats, error)
<ide> }
<ide>
<ide> // newStatsCollector returns a new statsCollector that collections
<ide> func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector
<ide> s := &statsCollector{
<ide> interval: interval,
<ide> supervisor: daemon,
<del> publishers: make(map[*Container]*pubsub.Publisher),
<add> publishers: make(map[*container.Container]*pubsub.Publisher),
<ide> clockTicksPerSecond: uint64(system.GetClockTicks()),
<ide> bufReader: bufio.NewReaderSize(nil, 128),
<ide> }
<ide> type statsCollector struct {
<ide> supervisor statsSupervisor
<ide> interval time.Duration
<ide> clockTicksPerSecond uint64
<del> publishers map[*Container]*pubsub.Publisher
<add> publishers map[*container.Container]*pubsub.Publisher
<ide> bufReader *bufio.Reader
<ide> }
<ide>
<ide> // collect registers the container with the collector and adds it to
<ide> // the event loop for collection on the specified interval returning
<ide> // a channel for the subscriber to receive on.
<del>func (s *statsCollector) collect(c *Container) chan interface{} {
<add>func (s *statsCollector) collect(c *container.Container) chan interface{} {
<ide> s.m.Lock()
<ide> defer s.m.Unlock()
<ide> publisher, exists := s.publishers[c]
<ide> func (s *statsCollector) collect(c *Container) chan interface{} {
<ide>
<ide> // stopCollection closes the channels for all subscribers and removes
<ide> // the container from metrics collection.
<del>func (s *statsCollector) stopCollection(c *Container) {
<add>func (s *statsCollector) stopCollection(c *container.Container) {
<ide> s.m.Lock()
<ide> if publisher, exists := s.publishers[c]; exists {
<ide> publisher.Close()
<ide> func (s *statsCollector) stopCollection(c *Container) {
<ide> }
<ide>
<ide> // unsubscribe removes a specific subscriber from receiving updates for a container's stats.
<del>func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
<add>func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
<ide> s.m.Lock()
<ide> publisher := s.publishers[c]
<ide> if publisher != nil {
<ide> func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
<ide>
<ide> func (s *statsCollector) run() {
<ide> type publishersPair struct {
<del> container *Container
<add> container *container.Container
<ide> publisher *pubsub.Publisher
<ide> }
<ide> // we cannot determine the capacity here.
<ide><path>daemon/stats_collector_windows.go
<ide> package daemon
<ide>
<del>import "time"
<add>import (
<add> "time"
<add>
<add> "github.com/docker/docker/container"
<add>)
<ide>
<ide> // newStatsCollector returns a new statsCollector for collection stats
<ide> // for a registered container at the specified interval. The collector allows
<ide> type statsCollector struct {
<ide> // collect registers the container with the collector and adds it to
<ide> // the event loop for collection on the specified interval returning
<ide> // a channel for the subscriber to receive on.
<del>func (s *statsCollector) collect(c *Container) chan interface{} {
<add>func (s *statsCollector) collect(c *container.Container) chan interface{} {
<ide> return nil
<ide> }
<ide>
<ide> // stopCollection closes the channels for all subscribers and removes
<ide> // the container from metrics collection.
<del>func (s *statsCollector) stopCollection(c *Container) {
<add>func (s *statsCollector) stopCollection(c *container.Container) {
<ide> }
<ide>
<ide> // unsubscribe removes a specific subscriber from receiving updates for a container's stats.
<del>func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) {
<add>func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) {
<ide> }
<ide><path>daemon/stop.go
<ide> import (
<ide> "time"
<ide>
<ide> "github.com/Sirupsen/logrus"
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> )
<ide>
<ide> func (daemon *Daemon) ContainerStop(name string, seconds int) error {
<ide> // process to exit. If a negative duration is given, Stop will wait
<ide> // for the initial signal forever. If the container is not running Stop returns
<ide> // immediately.
<del>func (daemon *Daemon) containerStop(container *Container, seconds int) error {
<add>func (daemon *Daemon) containerStop(container *container.Container, seconds int) error {
<ide> if !container.IsRunning() {
<ide> return nil
<ide> }
<ide>
<ide> // 1. Send a SIGTERM
<del> if err := daemon.killPossiblyDeadProcess(container, container.stopSignal()); err != nil {
<add> if err := daemon.killPossiblyDeadProcess(container, container.StopSignal()); err != nil {
<ide> logrus.Infof("Failed to send SIGTERM to the process, force killing")
<ide> if err := daemon.killPossiblyDeadProcess(container, 9); err != nil {
<ide> return err
<ide><path>daemon/unpause.go
<ide> package daemon
<ide>
<ide> import (
<add> "github.com/docker/docker/container"
<ide> derr "github.com/docker/docker/errors"
<ide> )
<ide>
<ide> func (daemon *Daemon) ContainerUnpause(name string) error {
<ide> }
<ide>
<ide> // containerUnpause resumes the container execution after the container is paused.
<del>func (daemon *Daemon) containerUnpause(container *Container) error {
<add>func (daemon *Daemon) containerUnpause(container *container.Container) error {
<ide> container.Lock()
<ide> defer container.Unlock()
<ide>
<ide> func (daemon *Daemon) containerUnpause(container *Container) error {
<ide> return derr.ErrorCodeNotPaused.WithArgs(container.ID)
<ide> }
<ide>
<del> if err := daemon.execDriver.Unpause(container.command); err != nil {
<add> if err := daemon.execDriver.Unpause(container.Command); err != nil {
<ide> return err
<ide> }
<ide>
<ide><path>daemon/volumes.go
<ide> import (
<ide> "strings"
<ide>
<ide> "github.com/docker/docker/api/types"
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/runconfig"
<ide> func (m mounts) parts(i int) int {
<ide> // 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination.
<ide> // 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations.
<ide> // 4. Cleanup old volumes that are about to be reasigned.
<del>func (daemon *Daemon) registerMountPoints(container *Container, hostConfig *runconfig.HostConfig) error {
<add>func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *runconfig.HostConfig) error {
<ide> binds := map[string]bool{}
<ide> mountPoints := map[string]*volume.MountPoint{}
<ide>
<ide><path>daemon/volumes_unix.go
<ide> package daemon
<ide>
<ide> import (
<del> "io/ioutil"
<ide> "os"
<ide> "sort"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<del> "github.com/docker/docker/pkg/chrootarchive"
<del> "github.com/docker/docker/pkg/system"
<ide> "github.com/docker/docker/volume"
<ide> volumedrivers "github.com/docker/docker/volume/drivers"
<ide> "github.com/docker/docker/volume/local"
<ide> )
<ide>
<del>// copyExistingContents copies from the source to the destination and
<del>// ensures the ownership is appropriately set.
<del>func copyExistingContents(source, destination string) error {
<del> volList, err := ioutil.ReadDir(source)
<del> if err != nil {
<del> return err
<del> }
<del> if len(volList) > 0 {
<del> srcList, err := ioutil.ReadDir(destination)
<del> if err != nil {
<del> return err
<del> }
<del> if len(srcList) == 0 {
<del> // If the source volume is empty copy files from the root into the volume
<del> if err := chrootarchive.CopyWithTar(source, destination); err != nil {
<del> return err
<del> }
<del> }
<del> }
<del> return copyOwnership(source, destination)
<del>}
<del>
<del>// copyOwnership copies the permissions and uid:gid of the source file
<del>// to the destination file
<del>func copyOwnership(source, destination string) error {
<del> stat, err := system.Stat(source)
<del> if err != nil {
<del> return err
<del> }
<del>
<del> if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil {
<del> return err
<del> }
<del>
<del> return os.Chmod(destination, os.FileMode(stat.Mode()))
<del>}
<del>
<ide> // setupMounts iterates through each of the mount points for a container and
<ide> // calls Setup() on each. It also looks to see if is a network mount such as
<ide> // /etc/resolv.conf, and if it is not, appends it to the array of mounts.
<del>func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
<add>func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
<ide> var mounts []execdriver.Mount
<ide> for _, m := range container.MountPoints {
<ide> path, err := m.Setup()
<ide> if err != nil {
<ide> return nil, err
<ide> }
<del> if !container.trySetNetworkMount(m.Destination, path) {
<add> if !container.TrySetNetworkMount(m.Destination, path) {
<ide> mounts = append(mounts, execdriver.Mount{
<ide> Source: path,
<ide> Destination: m.Destination,
<ide> func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, err
<ide> }
<ide>
<ide> mounts = sortMounts(mounts)
<del> netMounts := container.networkMounts()
<add> netMounts := container.NetworkMounts()
<ide> // if we are going to mount any of the network files from container
<ide> // metadata, the ownership must be set properly for potential container
<ide> // remapped root (user namespaces)
<ide><path>daemon/volumes_windows.go
<ide> package daemon
<ide> import (
<ide> "sort"
<ide>
<add> "github.com/docker/docker/container"
<ide> "github.com/docker/docker/daemon/execdriver"
<ide> derr "github.com/docker/docker/errors"
<ide> "github.com/docker/docker/volume"
<ide> import (
<ide> // of the configured mounts on the container to the execdriver mount structure
<ide> // which will ultimately be passed into the exec driver during container creation.
<ide> // It also ensures each of the mounts are lexographically sorted.
<del>func (daemon *Daemon) setupMounts(container *Container) ([]execdriver.Mount, error) {
<add>func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
<ide> var mnts []execdriver.Mount
<ide> for _, mount := range container.MountPoints { // type is volume.MountPoint
<ide> // If there is no source, take it from the volume path
<ide><path>opts/opts_windows.go
<ide> package opts
<ide> // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
<ide> // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
<ide> // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
<del>// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=toDiskLocking....
<add>// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
<ide> // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
<ide> // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
<ide> // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
| 60
|
Java
|
Java
|
add decorators for clienthttprequest & response
|
71b021c7cca87a6ca1d71532961dd082cf50c66a
|
<ide><path>spring-web/src/main/java/org/springframework/http/client/reactive/ClientHttpRequestDecorator.java
<add>/*
<add> * Copyright 2002-2017 the original author or authors.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package org.springframework.http.client.reactive;
<add>
<add>import java.net.URI;
<add>import java.util.function.Supplier;
<add>
<add>import org.reactivestreams.Publisher;
<add>import reactor.core.publisher.Mono;
<add>
<add>import org.springframework.core.io.buffer.DataBuffer;
<add>import org.springframework.core.io.buffer.DataBufferFactory;
<add>import org.springframework.http.HttpCookie;
<add>import org.springframework.http.HttpHeaders;
<add>import org.springframework.http.HttpMethod;
<add>import org.springframework.util.Assert;
<add>import org.springframework.util.MultiValueMap;
<add>
<add>/**
<add> * Wraps another {@link ClientHttpRequest} and delegates all methods to it.
<add> * Sub-classes can override specific methods selectively.
<add> *
<add> * @author Rossen Stoyanchev
<add> * @since 5.0
<add> */
<add>public class ClientHttpRequestDecorator implements ClientHttpRequest {
<add>
<add> private final ClientHttpRequest delegate;
<add>
<add>
<add> public ClientHttpRequestDecorator(ClientHttpRequest delegate) {
<add> Assert.notNull(delegate, "ClientHttpRequest delegate is required.");
<add> this.delegate = delegate;
<add> }
<add>
<add>
<add> public ClientHttpRequest getDelegate() {
<add> return this.delegate;
<add> }
<add>
<add>
<add> // ClientHttpRequest delegation methods...
<add>
<add> @Override
<add> public HttpMethod getMethod() {
<add> return this.delegate.getMethod();
<add> }
<add>
<add> @Override
<add> public URI getURI() {
<add> return this.delegate.getURI();
<add> }
<add>
<add> @Override
<add> public HttpHeaders getHeaders() {
<add> return this.delegate.getHeaders();
<add> }
<add>
<add> @Override
<add> public MultiValueMap<String, HttpCookie> getCookies() {
<add> return this.delegate.getCookies();
<add> }
<add>
<add> @Override
<add> public DataBufferFactory bufferFactory() {
<add> return this.delegate.bufferFactory();
<add> }
<add>
<add> @Override
<add> public void beforeCommit(Supplier<? extends Mono<Void>> action) {
<add> this.delegate.beforeCommit(action);
<add> }
<add>
<add> @Override
<add> public boolean isCommitted() {
<add> return this.delegate.isCommitted();
<add> }
<add>
<add> @Override
<add> public Mono<Void> writeWith(Publisher<? extends DataBuffer> body) {
<add> return this.delegate.writeWith(body);
<add> }
<add>
<add> @Override
<add> public Mono<Void> writeAndFlushWith(Publisher<? extends Publisher<? extends DataBuffer>> body) {
<add> return this.delegate.writeAndFlushWith(body);
<add> }
<add>
<add> @Override
<add> public Mono<Void> setComplete() {
<add> return this.delegate.setComplete();
<add> }
<add>
<add>
<add> @Override
<add> public String toString() {
<add> return getClass().getSimpleName() + " [delegate=" + getDelegate() + "]";
<add> }
<add>
<add>}
<ide><path>spring-web/src/main/java/org/springframework/http/client/reactive/ClientHttpResponseDecorator.java
<add>/*
<add> * Copyright 2002-2017 the original author or authors.
<add> *
<add> * Licensed under the Apache License, Version 2.0 (the "License");
<add> * you may not use this file except in compliance with the License.
<add> * You may obtain a copy of the License at
<add> *
<add> * http://www.apache.org/licenses/LICENSE-2.0
<add> *
<add> * Unless required by applicable law or agreed to in writing, software
<add> * distributed under the License is distributed on an "AS IS" BASIS,
<add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
<add> * See the License for the specific language governing permissions and
<add> * limitations under the License.
<add> */
<add>package org.springframework.http.client.reactive;
<add>
<add>import reactor.core.publisher.Flux;
<add>
<add>import org.springframework.core.io.buffer.DataBuffer;
<add>import org.springframework.http.HttpHeaders;
<add>import org.springframework.http.HttpStatus;
<add>import org.springframework.http.ResponseCookie;
<add>import org.springframework.util.Assert;
<add>import org.springframework.util.MultiValueMap;
<add>
<add>/**
<add> * Wraps another {@link ClientHttpResponse} and delegates all methods to it.
<add> * Sub-classes can override specific methods selectively.
<add> *
<add> * @author Rossen Stoyanchev
<add> * @since 5.0
<add> */
<add>public class ClientHttpResponseDecorator implements ClientHttpResponse {
<add>
<add> private final ClientHttpResponse delegate;
<add>
<add>
<add> public ClientHttpResponseDecorator(ClientHttpResponse delegate) {
<add> Assert.notNull(delegate, "ClientHttpResponse delegate is required.");
<add> this.delegate = delegate;
<add> }
<add>
<add>
<add> public ClientHttpResponse getDelegate() {
<add> return this.delegate;
<add> }
<add>
<add>
<add> // ServerHttpResponse delegation methods...
<add>
<add>
<add> @Override
<add> public HttpStatus getStatusCode() {
<add> return this.delegate.getStatusCode();
<add> }
<add>
<add> @Override
<add> public HttpHeaders getHeaders() {
<add> return this.delegate.getHeaders();
<add> }
<add>
<add> @Override
<add> public MultiValueMap<String, ResponseCookie> getCookies() {
<add> return this.delegate.getCookies();
<add> }
<add>
<add> @Override
<add> public Flux<DataBuffer> getBody() {
<add> return this.delegate.getBody();
<add> }
<add>
<add>
<add> @Override
<add> public String toString() {
<add> return getClass().getSimpleName() + " [delegate=" + getDelegate() + "]";
<add> }
<add>
<add>}
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/ServerHttpResponseDecorator.java
<ide> public Mono<Void> setComplete() {
<ide> return getDelegate().setComplete();
<ide> }
<ide>
<add>
<add> @Override
<add> public String toString() {
<add> return getClass().getSimpleName() + " [delegate=" + getDelegate() + "]";
<add> }
<add>
<ide> }
| 3
|
Ruby
|
Ruby
|
remove to_s method
|
4da2f24b0ee819475daae14a839d953777767e8c
|
<ide><path>Library/Homebrew/brew.rb
<ide> end
<ide>
<ide> def require?(path)
<add> path ||= ""
<ide> require path
<ide> rescue LoadError => e
<ide> # we should raise on syntax errors but not if the file doesn't exist.
<ide><path>Library/Homebrew/cask/lib/hbc/cli.rb
<ide> def self.run_command(command, *rest)
<ide> if command.respond_to?(:run)
<ide> # usual case: built-in command verb
<ide> command.run(*rest)
<del> elsif require?(which("brewcask-#{command}.rb").to_s)
<add> elsif require?(which("brewcask-#{command}.rb"))
<ide> # external command as Ruby library on PATH, Homebrew-style
<ide> elsif command.to_s.include?("/") && require?(command.to_s)
<ide> # external command as Ruby library with literal path, useful
| 2
|
Ruby
|
Ruby
|
optimize parts of hashwithindifferentaccess
|
94617d7da1e76daeae3a05c61b42321163f93d62
|
<ide><path>activesupport/lib/active_support/hash_with_indifferent_access.rb
<ide> def extractable_options?
<ide> true
<ide> end
<ide>
<add> def with_indifferent_access
<add> self
<add> end
<add>
<ide> def initialize(constructor = {})
<ide> if constructor.is_a?(Hash)
<ide> super()
<ide> def []=(key, value)
<ide> # hash_1.update(hash_2) # => {"key"=>"New Value!"}
<ide> #
<ide> def update(other_hash)
<del> other_hash.each_pair { |key, value| regular_writer(convert_key(key), convert_value(value)) }
<del> self
<add> if other_hash.is_a? HashWithIndifferentAccess
<add> super(other_hash)
<add> else
<add> other_hash.each_pair { |key, value| regular_writer(convert_key(key), convert_value(value)) }
<add> self
<add> end
<ide> end
<ide>
<ide> alias_method :merge!, :update
<ide><path>activesupport/test/core_ext/hash_ext_test.rb
<ide> def test_should_nil_if_no_default_value_is_supplied
<ide> assert_nil hash_wia.default
<ide> end
<ide>
<add> def test_should_return_self_for_with_indifferent_access
<add> hash_wia = HashWithIndifferentAccess.new
<add> assert_equal hash_wia, hash_wia.with_indifferent_access
<add> end
<add>
<ide> def test_should_copy_the_default_value_when_converting_to_hash_with_indifferent_access
<ide> hash = Hash.new(3)
<ide> hash_wia = hash.with_indifferent_access
| 2
|
PHP
|
PHP
|
remove repeated conditions
|
a45d462688c3a3a44f449a79729ad74de7e6418f
|
<ide><path>lib/Cake/Database/Schema/MysqlSchema.php
<ide> public function convertIndexDescription(Table $table, $row) {
<ide> if (!empty($row['Sub_part'])) {
<ide> $length[$row['Column_name']] = $row['Sub_part'];
<ide> }
<del> if ($type == 'index' || $type == 'fulltext') {
<add> $isIndex = (
<add> $type == 'index' ||
<add> $type == 'fulltext'
<add> );
<add> if ($isIndex) {
<ide> $existing = $table->index($name);
<ide> } else {
<ide> $existing = $table->constraint($name);
<ide> public function convertIndexDescription(Table $table, $row) {
<ide> $columns = array_merge($existing['columns'], $columns);
<ide> $length = array_merge($existing['length'], $length);
<ide> }
<del> if ($type == 'index' || $type == 'fulltext') {
<add> if ($isIndex) {
<ide> $table->addIndex($name, [
<ide> 'type' => $type,
<ide> 'columns' => $columns,
| 1
|
Ruby
|
Ruby
|
change time#sec_fraction to use subsec
|
88d844b278163f19e910fc1acadf1dbb4afac527
|
<ide><path>activesupport/lib/active_support/core_ext/time/calculations.rb
<ide> def seconds_until_end_of_day
<ide>
<ide> # Returns the fraction of a second as a +Rational+
<ide> #
<del> # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2)
<add> # Time.new(2012, 8, 29, 0, 0, 0.5).sec_fraction # => (1/2)
<ide> def sec_fraction
<del> Rational(nsec, 1000000000)
<add> subsec
<ide> end
<ide>
<ide> # Returns a new Time where one or more of the elements have been changed according
<ide><path>activesupport/test/core_ext/time_ext_test.rb
<ide> def test_seconds_until_end_of_day_at_daylight_savings_time_end
<ide> end
<ide> end
<ide>
<add> def test_sec_fraction
<add> time = Time.utc(2016, 4, 23, 0, 0, Rational(1,10000000000))
<add> assert_equal Rational(1,10000000000), time.sec_fraction
<add>
<add> time = Time.utc(2016, 4, 23, 0, 0, 0.0000000001)
<add> assert_equal 0.0000000001.to_r, time.sec_fraction
<add>
<add> time = Time.utc(2016, 4, 23, 0, 0, 0, Rational(1,10000))
<add> assert_equal Rational(1,10000000000), time.sec_fraction
<add>
<add> time = Time.utc(2016, 4, 23, 0, 0, 0, 0.0001)
<add> assert_equal 0.0001.to_r / 1000000, time.sec_fraction
<add> end
<add>
<ide> def test_beginning_of_day
<ide> assert_equal Time.local(2005,2,4,0,0,0), Time.local(2005,2,4,10,10,10).beginning_of_day
<ide> with_env_tz 'US/Eastern' do
| 2
|
Ruby
|
Ruby
|
use start_with? instead of regexp match
|
de208141628daf6cb93839572721a8810079df58
|
<ide><path>Library/Homebrew/keg_fix_install_names.rb
<ide> def fix_install_names options={}
<ide>
<ide> each_install_name_for(file) do |bad_name|
<ide> # Don't fix absolute paths unless they are rooted in the build directory
<del> next if bad_name.start_with? '/' and not %r[^#{HOMEBREW_TEMP}] === bad_name
<add> next if bad_name.start_with? '/' and not bad_name.start_with? HOMEBREW_TEMP.to_s
<ide>
<ide> new_name = fixed_name(file, bad_name)
<ide> change_install_name(bad_name, new_name, file) unless new_name == bad_name
| 1
|
PHP
|
PHP
|
fix cookies sending by responseemitter
|
4efbfcda4c16f939460bc2bcf9d19e033f810454
|
<ide><path>src/Http/ResponseEmitter.php
<ide> protected function emitStatusLine(ResponseInterface $response)
<ide> */
<ide> protected function emitHeaders(ResponseInterface $response)
<ide> {
<add> $cookies = [];
<add> if (method_exists($response, 'cookie')) {
<add> $cookies = $response->cookie();
<add> }
<add>
<ide> foreach ($response->getHeaders() as $name => $values) {
<ide> if (strtolower($name) === 'set-cookie') {
<del> $this->emitCookies($values);
<add> $cookies = array_merge($cookies, $values);
<ide> continue;
<ide> }
<ide> $first = true;
<ide> protected function emitHeaders(ResponseInterface $response)
<ide> $first = false;
<ide> }
<ide> }
<add>
<add> $this->emitCookies($cookies);
<ide> }
<ide>
<ide> /**
<ide> protected function emitHeaders(ResponseInterface $response)
<ide> */
<ide> protected function emitCookies(array $cookies)
<ide> {
<del> foreach ((array)$cookies as $cookie) {
<add> foreach ($cookies as $cookie) {
<add> if (is_array($cookie)) {
<add> setcookie(
<add> $cookie['name'],
<add> $cookie['value'],
<add> $cookie['expire'],
<add> $cookie['path'],
<add> $cookie['domain'],
<add> $cookie['secure'],
<add> $cookie['httpOnly']
<add> );
<add> continue;
<add> }
<add>
<ide> if (strpos($cookie, '";"') !== false) {
<ide> $cookie = str_replace('";"', "{__cookie_replace__}", $cookie);
<ide> $parts = str_replace("{__cookie_replace__}", '";"', explode(';', $cookie));
| 1
|
Python
|
Python
|
add top script to generate binaries from scratch
|
d23e5e4cde838c1aa46b0e085955cdb959e6755a
|
<ide><path>tools/win32build/doall.py
<add>import subprocess
<add>import os
<add>
<add>PYVER = "2.5"
<add>
<add># Bootstrap
<add>subprocess.check_call(['python', 'prepare_bootstrap.py'])
<add>
<add># Build binaries
<add>subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
<add>
<add># Build installer using nsis
<add>subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
| 1
|
PHP
|
PHP
|
use implicit null instead of explicit
|
c4da7b11df210e79c3156fff648768e5bac79825
|
<ide><path>src/Illuminate/Cookie/CookieJar.php
<ide> class CookieJar implements JarContract
<ide> *
<ide> * @var string
<ide> */
<del> protected $domain = null;
<add> protected $domain;
<ide>
<ide> /**
<ide> * The default secure setting (defaults to false).
<ide><path>src/Illuminate/Foundation/Application.php
<ide> class Application extends Container implements ApplicationContract, HttpKernelIn
<ide> *
<ide> * @var string
<ide> */
<del> protected $namespace = null;
<add> protected $namespace;
<ide>
<ide> /**
<ide> * Create a new Illuminate application instance.
<ide><path>src/Illuminate/Notifications/Messages/MailMessage.php
<ide> class MailMessage extends SimpleMessage
<ide> *
<ide> * @var int
<ide> */
<del> public $priority = null;
<add> public $priority;
<ide>
<ide> /**
<ide> * Set the view for the mail message.
<ide><path>src/Illuminate/Notifications/Messages/SimpleMessage.php
<ide> class SimpleMessage
<ide> /**
<ide> * The notification's greeting.
<ide> *
<del> * @var string|null
<add> * @var string
<ide> */
<del> public $greeting = null;
<add> public $greeting;
<ide>
<ide> /**
<ide> * The "intro" lines of the notification.
<ide><path>src/Illuminate/Notifications/SendQueuedNotifications.php
<ide> class SendQueuedNotifications implements ShouldQueue
<ide> *
<ide> * @var array
<ide> */
<del> protected $channels = null;
<add> protected $channels;
<ide>
<ide> /**
<ide> * Create a new job instance.
<ide><path>src/Illuminate/Pagination/AbstractPaginator.php
<ide> abstract class AbstractPaginator implements Htmlable
<ide> *
<ide> * @var string|null
<ide> */
<del> protected $fragment = null;
<add> protected $fragment;
<ide>
<ide> /**
<ide> * The query string variable used to store the page.
<ide><path>src/Illuminate/Validation/DatabasePresenceVerifier.php
<ide> class DatabasePresenceVerifier implements PresenceVerifierInterface
<ide> *
<ide> * @var string
<ide> */
<del> protected $connection = null;
<add> protected $connection;
<ide>
<ide> /**
<ide> * Create a new database presence verifier.
<ide><path>tests/Database/DatabaseEloquentBelongsToTest.php
<ide> class AnotherEloquentBelongsToModelStub extends Illuminate\Database\Eloquent\Mod
<ide>
<ide> class MissingEloquentBelongsToModelStub extends Illuminate\Database\Eloquent\Model
<ide> {
<del> public $foreign_key = null;
<add> public $foreign_key;
<ide> }
<ide><path>tests/Mail/MailSesTransportTest.php
<ide> public function testSend()
<ide>
<ide> class sendRawEmailMock
<ide> {
<del> protected $getResponse = null;
<add> protected $getResponse;
<ide>
<ide> public function __construct($responseValue)
<ide> {
<ide><path>tests/Queue/QueueWorkerTest.php
<ide> private function workerOptions(array $overrides = [])
<ide> */
<ide> class InsomniacWorker extends Illuminate\Queue\Worker
<ide> {
<del> public $sleptFor = null;
<add> public $sleptFor;
<ide>
<ide> public function sleep($seconds)
<ide> {
| 10
|
Javascript
|
Javascript
|
print more lines in the error diff
|
81496567e76006a7d07a8552215fc6333c183480
|
<ide><path>lib/internal/assert/assertion_error.js
<ide> function createErrDiff(actual, expected, operator) {
<ide> let a = actualLines[actualLines.length - 1];
<ide> let b = expectedLines[expectedLines.length - 1];
<ide> while (a === b) {
<del> if (i++ < 2) {
<add> if (i++ < 3) {
<ide> end = `\n ${a}${end}`;
<ide> } else {
<ide> other = a;
<ide> function createErrDiff(actual, expected, operator) {
<ide> return `${kReadableOperator.notIdentical}\n\n${actualLines.join('\n')}\n`;
<ide> }
<ide>
<del> if (i > 3) {
<add> // There were at least five identical lines at the end. Mark a couple of
<add> // skipped.
<add> if (i >= 5) {
<ide> end = `\n${blue}...${white}${end}`;
<ide> skipped = true;
<ide> }
<ide> function createErrDiff(actual, expected, operator) {
<ide> `\n${green}+ actual${white} ${red}- expected${white}`;
<ide> const skippedMsg = ` ${blue}...${white} Lines skipped`;
<ide>
<add> let lines = actualLines;
<add> let plusMinus = `${green}+${white}`;
<add> let maxLength = expectedLines.length;
<add> if (actualLines.length < maxLines) {
<add> lines = expectedLines;
<add> plusMinus = `${red}-${white}`;
<add> maxLength = actualLines.length;
<add> }
<add>
<ide> for (i = 0; i < maxLines; i++) {
<del> if (actualLines.length < i + 1) {
<del> // If more than one former line is identical, print that. Collapse those
<del> // in case more than three lines before were identical.
<del> if (identical > 1) {
<add> if (maxLength < i + 1) {
<add> // If more than two former lines are identical, print them. Collapse them
<add> // in case more than five lines were identical.
<add> if (identical > 2) {
<ide> if (identical > 3) {
<del> res += `\n${blue}...${white}`;
<del> skipped = true;
<del> } else if (identical > 2) {
<del> res += `\n ${expectedLines[i - 2]}`;
<add> if (identical > 4) {
<add> if (identical === 5) {
<add> res += `\n ${lines[i - 3]}`;
<add> printedLines++;
<add> } else {
<add> res += `\n${blue}...${white}`;
<add> skipped = true;
<add> }
<add> }
<add> res += `\n ${lines[i - 2]}`;
<ide> printedLines++;
<ide> }
<del> res += `\n ${expectedLines[i - 1]}`;
<add> res += `\n ${lines[i - 1]}`;
<ide> printedLines++;
<ide> }
<ide> // No identical lines before.
<ide> identical = 0;
<ide> // Add the expected line to the cache.
<del> other += `\n${red}-${white} ${expectedLines[i]}`;
<del> printedLines++;
<del> // Only extra actual lines exist
<del> } else if (expectedLines.length < i + 1) {
<del> // If more than one former line is identical, print that. Collapse those
<del> // in case more than three lines before were identical.
<del> if (identical > 1) {
<del> if (identical > 3) {
<del> res += `\n${blue}...${white}`;
<del> skipped = true;
<del> } else if (identical > 2) {
<del> res += `\n ${actualLines[i - 2]}`;
<del> printedLines++;
<del> }
<del> res += `\n ${actualLines[i - 1]}`;
<del> printedLines++;
<add> if (lines === actualLines) {
<add> res += `\n${plusMinus} ${lines[i]}`;
<add> } else {
<add> other += `\n${plusMinus} ${lines[i]}`;
<ide> }
<del> // No identical lines before.
<del> identical = 0;
<del> // Add the actual line to the result.
<del> res += `\n${green}+${white} ${actualLines[i]}`;
<ide> printedLines++;
<add> // Only extra actual lines exist
<ide> // Lines diverge
<ide> } else {
<ide> const expectedLine = expectedLines[i];
<ide> function createErrDiff(actual, expected, operator) {
<ide> actualLine += ',';
<ide> }
<ide> if (divergingLines) {
<del> // If more than one former line is identical, print that. Collapse those
<del> // in case more than three lines before were identical.
<del> if (identical > 1) {
<add> // If more than two former lines are identical, print them. Collapse
<add> // them in case more than five lines were identical.
<add> if (identical > 2) {
<ide> if (identical > 3) {
<del> res += `\n${blue}...${white}`;
<del> skipped = true;
<del> } else if (identical > 2) {
<add> if (identical > 4) {
<add> if (identical === 5) {
<add> res += `\n ${actualLines[i - 3]}`;
<add> printedLines++;
<add> } else {
<add> res += `\n${blue}...${white}`;
<add> skipped = true;
<add> }
<add> }
<ide> res += `\n ${actualLines[i - 2]}`;
<ide> printedLines++;
<ide> }
<ide> function createErrDiff(actual, expected, operator) {
<ide> identical++;
<ide> // The very first identical line since the last diverging line is be
<ide> // added to the result.
<del> if (identical === 1) {
<add> if (identical <= 2) {
<ide> res += `\n ${actualLine}`;
<ide> printedLines++;
<ide> }
<ide><path>test/parallel/test-assert-deep.js
<ide> assert.throws(
<ide> code: 'ERR_ASSERTION',
<ide> message: `${defaultMsgStartFull} ... Lines skipped\n\n` +
<ide> '+ Uint8Array [\n' +
<del> '- Buffer [Uint8Array] [\n 120,\n...\n 10\n ]'
<add> '- Buffer [Uint8Array] [\n 120,\n...\n 122,\n 10\n ]'
<ide> }
<ide> );
<ide> assert.deepEqual(arr, buf);
<ide> assert.deepEqual(arr, buf);
<ide> () => assert.deepStrictEqual(buf2, buf),
<ide> {
<ide> code: 'ERR_ASSERTION',
<del> message: `${defaultMsgStartFull} ... Lines skipped\n\n` +
<add> message: `${defaultMsgStartFull}\n\n` +
<ide> ' Buffer [Uint8Array] [\n' +
<del> '...\n' +
<add> ' 120,\n' +
<add> ' 121,\n' +
<add> ' 122,\n' +
<ide> ' 10,\n' +
<ide> '+ prop: 1\n' +
<ide> ' ]'
<ide> assert.deepEqual(arr, buf);
<ide> () => assert.deepStrictEqual(arr, arr2),
<ide> {
<ide> code: 'ERR_ASSERTION',
<del> message: `${defaultMsgStartFull} ... Lines skipped\n\n` +
<add> message: `${defaultMsgStartFull}\n\n` +
<ide> ' Uint8Array [\n' +
<del> '...\n' +
<add> ' 120,\n' +
<add> ' 121,\n' +
<add> ' 122,\n' +
<ide> ' 10,\n' +
<ide> '- prop: 5\n' +
<ide> ' ]'
<ide> assert.deepStrictEqual(obj1, obj2);
<ide> ),
<ide> {
<ide> message: 'Expected values to be strictly deep-equal:\n' +
<del> '+ actual - expected ... Lines skipped\n' +
<del> '\n' +
<del> ' Comparison {\n' +
<del> '...\n' +
<del> " \"+ foo: 'bar'\\n\" +\n" +
<del> "+ \"- foo: 'baz.'\\n\" +\n" +
<del> "- \"- foo: 'baz'\\n\" +\n" +
<del> " ' }',\n" +
<del> "+ operator: 'deepStrictEqual'\n" +
<del> "- operator: 'throws'\n" +
<del> ' }'
<add> '+ actual - expected ... Lines skipped\n' +
<add> '\n' +
<add> ' Comparison {\n' +
<add> " message: 'Expected values to be strictly deep-equal:\\n' +\n" +
<add> '...\n' +
<add> " ' [TypeError: foo] {\\n' +\n" +
<add> " \"+ foo: 'bar'\\n\" +\n" +
<add> "+ \"- foo: 'baz.'\\n\" +\n" +
<add> "- \"- foo: 'baz'\\n\" +\n" +
<add> " ' }',\n" +
<add> "+ operator: 'deepStrictEqual'\n" +
<add> "- operator: 'throws'\n" +
<add> ' }'
<ide> }
<ide> );
<ide> }
<ide><path>test/parallel/test-assert.js
<ide> assert.throws(
<ide> '',
<ide> ' [',
<ide> ' [',
<del> '...',
<add> ' [',
<add> ' 1,',
<ide> ' 2,',
<ide> '+ 3',
<ide> "- '3'",
<ide> ' ]',
<ide> '...',
<add> ' 4,',
<ide> ' 5',
<ide> ' ]'].join('\n');
<ide> assert.throws(
<ide> assert.throws(
<ide> ' [',
<ide> ' 1,',
<ide> '...',
<add> ' 1,',
<ide> ' 0,',
<ide> '- 1,',
<ide> ' 1,',
<ide> '...',
<add> ' 1,',
<ide> ' 1',
<ide> ' ]'
<ide> ].join('\n');
<ide> assert.throws(
<ide> ' [',
<ide> ' 1,',
<ide> '...',
<add> ' 1,',
<ide> ' 0,',
<ide> '+ 1,',
<ide> ' 1,',
<del> '...',
<add> ' 1,',
<ide> ' 1',
<ide> ' ]'
<ide> ].join('\n');
<ide><path>test/pseudo-tty/test-assert-colors.js
<ide> try {
<ide> // active.
<ide> process.env.TERM = 'FOOBAR';
<ide> delete process.env.NODE_DISABLE_COLORS;
<del> assert.deepStrictEqual([1, 2, 2, 2], [2, 2, 2, 2]);
<add> assert.deepStrictEqual([1, 2, 2, 2, 2], [2, 2, 2, 2, 2]);
<ide> } catch (err) {
<ide> const expected = 'Expected values to be strictly deep-equal:\n' +
<ide> '\u001b[32m+ actual\u001b[39m \u001b[31m- expected\u001b[39m' +
<ide> try {
<ide> '\u001b[31m-\u001b[39m 2,\n' +
<ide> ' 2,\n' +
<ide> '\u001b[34m...\u001b[39m\n' +
<add> ' 2,\n' +
<ide> ' 2\n' +
<ide> ' ]';
<ide> assert.strictEqual(err.message, expected);
| 4
|
Javascript
|
Javascript
|
fix url validator example
|
9272a1a47279bfe5946dc3f23730d5e3b6259552
|
<ide><path>src/validators.js
<ide> extend(angularValidator, {
<ide> * @example
<ide> <doc:example>
<ide> <doc:source>
<del> Enter valid phone number:
<add> Enter valid URL:
<ide> <input name="text" value="http://example.com/abc.html" size="40" ng:validate="url" >
<ide> </doc:source>
<ide> <doc:scenario>
| 1
|
Text
|
Text
|
update faq entry about intermediate output display
|
6dfa8b1d60f53f3d39e8286a5e3d6ba21dcbbb02
|
<ide><path>docs/templates/getting-started/faq.md
<ide> - [How can I run Keras on GPU?](#how-can-i-run-keras-on-gpu)
<ide> - [How can I save a Keras model?](#how-can-i-save-a-keras-model)
<ide> - [Why is the training loss much higher than the testing loss?](#why-is-the-training-loss-much-higher-than-the-testing-loss)
<del>- [How can I visualize the output of an intermediate layer?](#how-can-i-visualize-the-output-of-an-intermediate-layer)
<add>- [How can I obtain the output of an intermediate layer?](#how-can-i-obtain-the-output-of-an-intermediate-layer)
<ide> - [How can I use Keras with datasets that don't fit in memory?](#how-can-i-use-keras-with-datasets-that-dont-fit-in-memory)
<ide> - [How can I interrupt training when the validation loss isn't decreasing anymore?](#how-can-i-interrupt-training-when-the-validation-loss-isnt-decreasing-anymore)
<ide> - [How is the validation split computed?](#how-is-the-validation-split-computed)
<ide> Besides, the training loss is the average of the losses over each batch of train
<ide>
<ide> ---
<ide>
<del>### How can I visualize the output of an intermediate layer?
<add>### How can I obtain the output of an intermediate layer?
<ide>
<del>You can build a Keras function that will return the output of a certain layer given a certain input, for example:
<add>One simple way is to create a new `Model` that will output the layers that you are interested in:
<add>
<add>```python
<add>from keras.models import Model
<add>
<add>model = ... # create the original model
<add>
<add>layer_name = 'my_layer'
<add>intermediate_layer_model = Model(input=model.input,
<add> output=model.get_layer(layer_name).output)
<add>intermediate_output = intermediate_layer_model.predict(data)
<add>```
<add>
<add>Alternatively, you can build a Keras function that will return the output of a certain layer given a certain input, for example:
<ide>
<ide> ```python
<ide> from keras import backend as K
<ide> layer_output = get_3rd_layer_output([X, 0])[0]
<ide> layer_output = get_3rd_layer_output([X, 1])[0]
<ide> ```
<ide>
<del>Another more flexible way of getting output from intermediate layers is to use the [functional API](/getting-started/functional-api-guide). For example, if you have created an autoencoder for MNIST:
<del>
<del>```python
<del>inputs = Input(shape=(784,))
<del>encoded = Dense(32, activation='relu')(inputs)
<del>decoded = Dense(784)(encoded)
<del>model = Model(input=inputs, output=decoded)
<del>```
<del>
<del>After compiling and training the model, you can get the output of the data from the encoder like this:
<del>
<del>```python
<del>encoder = Model(input=inputs, output=encoded)
<del>X_encoded = encoder.predict(X)
<del>```
<del>
<ide> ---
<ide>
<ide> ### How can I use Keras with datasets that don't fit in memory?
| 1
|
Ruby
|
Ruby
|
add test case for clear mappings
|
fccb7523008daca92eaec0100742b0a023ce5cbb
|
<ide><path>activerecord/test/cases/connection_adapters/type/type_map_test.rb
<ide> def test_lookup_non_strings
<ide> assert_equal mapping.lookup(3), 'string'
<ide> assert_kind_of Type::Value, mapping.lookup(4)
<ide> end
<add>
<add> def test_clear_mappings
<add> time = Time.new
<add> mapping = TypeMap.new
<add>
<add> mapping.register_type(/time/i, time)
<add> mapping.clear
<add>
<add> assert_not_equal mapping.lookup('time'), time
<add> end
<ide> end
<ide> end
<ide> end
| 1
|
Python
|
Python
|
add test for build error special values
|
e50767cfca081681f527c54d3f6652d11e95e758
|
<ide><path>tests/test_basic.py
<ide> def handler_raises_build_error(error, endpoint, values):
<ide> pytest.raises(BuildError, flask.url_for, 'not.existing')
<ide>
<ide>
<add>def test_url_for_passes_special_values_to_build_error_handler():
<add> app = flask.Flask(__name__)
<add>
<add> @app.url_build_error_handlers.append
<add> def handler(error, endpoint, values):
<add> assert values == {
<add> '_external': False,
<add> '_anchor': None,
<add> '_method': None,
<add> '_scheme': None,
<add> }
<add> return 'handled'
<add>
<add> with app.test_request_context():
<add> flask.url_for('/')
<add>
<add>
<ide> def test_custom_converters():
<ide> from werkzeug.routing import BaseConverter
<ide>
| 1
|
Python
|
Python
|
fix linting errors
|
3af989a361f58c4cb0e3d2d4035ef6b246de335d
|
<ide><path>libcloud/common/aws.py
<ide> def add_default_params(self, params):
<ide> def pre_connect_hook(self, params, headers):
<ide> now = datetime.utcnow()
<ide> headers['X-AMZ-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
<del> headers['Authorization'] = self._get_authorization_v4_header(params, headers, now)
<add> headers['Authorization'] = \
<add> self._get_authorization_v4_header(params, headers, now)
<ide>
<ide> return params, headers
<ide>
<ide> def _get_authorization_v4_header(self, params, headers, dt):
<del> assert self.method == 'GET', 'AWS Signature V4 not implemented for other methods than GET'
<add> assert self.method == 'GET', 'AWS Signature V4 not implemented for ' \
<add> 'other methods than GET'
<ide>
<del> return 'AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, SignedHeaders=%(sh)s, Signature=%(s)s' % {
<del> 'u': self.user_id,
<del> 'c': self._get_credential_scope(dt),
<del> 'sh': self._get_signed_headers(headers),
<del> 's': self._get_signature(params, headers, dt)
<del> }
<add> return 'AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, ' \
<add> 'SignedHeaders=%(sh)s, Signature=%(s)s' % {
<add> 'u': self.user_id,
<add> 'c': self._get_credential_scope(dt),
<add> 'sh': self._get_signed_headers(headers),
<add> 's': self._get_signature(params, headers, dt)
<add> }
<ide>
<ide> def _get_signature(self, params, headers, dt):
<ide> return _sign(
<ide> def _get_payload_hash(self):
<ide>
<ide> def _get_request_params(self, params):
<ide> # For self.method == GET
<del> return '&'.join(["%s=%s" % (urlquote(k, safe=''), urlquote(str(v), safe='~'))
<add> return '&'.join(["%s=%s" %
<add> (urlquote(k, safe=''), urlquote(str(v), safe='~'))
<ide> for k, v in sorted(params.items())])
<ide>
<ide> def _get_canonical_request(self, params, headers):
<ide><path>libcloud/compute/drivers/ec2.py
<ide> from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
<ide> from libcloud.utils.publickey import get_pubkey_comment
<ide> from libcloud.utils.iso8601 import parse_date
<del>from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection, V4SignedAWSConnection
<add>from libcloud.common.aws import (AWSBaseResponse, SignedAWSConnection,
<add> V4SignedAWSConnection)
<ide> from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
<ide> LibcloudError)
<ide> from libcloud.compute.providers import Provider
<ide> class EC2Connection(SignedAWSConnection):
<ide>
<ide> class EC2V4Connection(V4SignedAWSConnection):
<ide> """
<del> Represents a single connection to an EC2 Endpoint using signature version 4.
<add> Represents a single connection to an EC2 Endpoint using signature v4.
<ide> """
<ide> version = API_VERSION
<ide> host = REGION_DETAILS['us-east-1']['endpoint']
| 2
|
Text
|
Text
|
add eljefedelrodeodeljefe to collaborators
|
be5d699055e23a11f20c625bc1f8d96cd387bb7a
|
<ide><path>README.md
<ide> information about the governance of the Node.js project, see
<ide> * [calvinmetcalf](https://github.com/calvinmetcalf) - **Calvin Metcalf** <calvin.metcalf@gmail.com>
<ide> * [claudiorodriguez](https://github.com/claudiorodriguez) - **Claudio Rodriguez** <cjrodr@yahoo.com>
<ide> * [domenic](https://github.com/domenic) - **Domenic Denicola** <d@domenic.me>
<add>* [eljefedelrodeodeljefe](https://github.com/eljefedelrodeodeljefe) - **Robert Lindstaedt** <robert.lindstaedt@gmail.com>
<ide> * [estliberitas](https://github.com/estliberitas) - **Alexander Makarenko** <estliberitas@gmail.com>
<ide> * [geek](https://github.com/geek) - **Wyatt Preul** <wpreul@gmail.com>
<ide> * [iarna](https://github.com/iarna) - **Rebecca Turner** <me@re-becca.org>
| 1
|
Text
|
Text
|
add a link to front-end integrations
|
51762aece5a08f8961989c367a87be2ff2a52174
|
<ide><path>docs/index.md
<ide>
<ide> You can get the latest version of Chart.js from [npm](https://npmjs.com/package/chart.js), the [GitHub releases](https://github.com/chartjs/Chart.js/releases/latest), or use a [Chart.js CDN](https://www.jsdelivr.com/package/npm/chart.js). Detailed installation instructions can be found on the [installation](./getting-started/installation.md) page.
<ide>
<add>If you're using a front-end framework (e.g., React, Angular, or Vue), please check [available integrations](https://github.com/chartjs/awesome#integrations).
<add>
<ide> ## Creating a Chart
<ide>
<ide> It's easy to get started with Chart.js. All that's required is the script included in your page along with a single `<canvas>` node to render the chart.
| 1
|
Javascript
|
Javascript
|
add moveacross test for pointer events
|
93b51b5f868e3b18c448422eccbd1bab4ca4889e
|
<ide><path>packages/rn-tester/js/examples/Experimental/PlatformTest/RNTesterPlatformTestEventRecorder.js
<add>/**
<add> * Copyright (c) Meta Platforms, Inc. and affiliates.
<add> *
<add> * This source code is licensed under the MIT license found in the
<add> * LICENSE file in the root directory of this source tree.
<add> *
<add> * @format
<add> * @flow
<add> */
<add>
<add>import type {ViewProps} from 'react-native/Libraries/Components/View/ViewPropTypes';
<add>
<add>import {useMemo} from 'react';
<add>
<add>type EventRecorderOptions = $ReadOnly<{
<add> mergeEventTypes: Array<string>,
<add> relevantEvents: Array<string>,
<add>}>;
<add>
<add>type EventRecord = {
<add> chronologicalOrder: number,
<add> sequentialOccurrences: number,
<add> nestedEvents: ?Array<EventRecord>,
<add> target: string,
<add> type: string,
<add> event: Object,
<add>};
<add>
<add>class RNTesterPlatformTestEventRecorder {
<add> allRecords: Array<EventRecord> = [];
<add> relevantEvents: Array<string> = [];
<add> rawOrder: number = 1;
<add> eventsInScope: Array<EventRecord> = []; // Tracks syncronous event dispatches
<add> recording: boolean = true;
<add>
<add> mergeTypesTruthMap: {[string]: boolean} = {};
<add>
<add> constructor(options: EventRecorderOptions) {
<add> if (options.mergeEventTypes && Array.isArray(options.mergeEventTypes)) {
<add> options.mergeEventTypes.forEach(eventType => {
<add> this.mergeTypesTruthMap[eventType] = true;
<add> });
<add> }
<add> if (options.relevantEvents && Array.isArray(options.relevantEvents)) {
<add> this.relevantEvents = options.relevantEvents;
<add> }
<add> }
<add>
<add> _createEventRecord(
<add> rawEvent: Object,
<add> target: string,
<add> type: string,
<add> ): EventRecord {
<add> return {
<add> chronologicalOrder: this.rawOrder++,
<add> sequentialOccurrences: 1,
<add> nestedEvents: undefined,
<add> target,
<add> type,
<add> event: rawEvent,
<add> };
<add> }
<add>
<add> _recordEvent(e: Object, targetName: string, eventType: string): ?EventRecord {
<add> const record = this._createEventRecord(e, targetName, eventType);
<add> let recordList = this.allRecords;
<add> // Adjust which sequential list to use depending on scope
<add> if (this.eventsInScope.length > 0) {
<add> let newRecordList =
<add> this.eventsInScope[this.eventsInScope.length - 1].nestedEvents;
<add> if (newRecordList == null) {
<add> newRecordList = this.eventsInScope[
<add> this.eventsInScope.length - 1
<add> ].nestedEvents = [];
<add> }
<add> recordList = newRecordList;
<add> }
<add> if (this.mergeTypesTruthMap[eventType] && recordList.length > 0) {
<add> const tail = recordList[recordList.length - 1];
<add> // Same type and target?
<add> if (tail.type === eventType && tail.target === targetName) {
<add> tail.sequentialOccurrences++;
<add> return;
<add> }
<add> }
<add> recordList.push(record);
<add> return record;
<add> }
<add>
<add> _generateRecordedEventHandlerWithCallback(
<add> targetName: string,
<add> callback?: (event: Object, eventType: string) => void,
<add> ): (Object, string) => void {
<add> return (e: Object, eventType: string) => {
<add> if (this.recording) {
<add> this._recordEvent(e, targetName, eventType);
<add> if (callback) {
<add> callback(e, eventType);
<add> }
<add> }
<add> };
<add> }
<add>
<add> useRecorderTestEventHandlers(
<add> targetNames: $ReadOnlyArray<string>,
<add> callback?: (event: Object, eventType: string, targetName: string) => void,
<add> ): $ReadOnly<{[targetName: string]: ViewProps}> {
<add> // Yes this method exists as a class's prototype method but it will still only be used
<add> // in functional components
<add> // eslint-disable-next-line react-hooks/rules-of-hooks
<add> return useMemo(() => {
<add> const result: {[targetName: string]: ViewProps} = {};
<add> for (const targetName of targetNames) {
<add> const recordedEventHandler =
<add> this._generateRecordedEventHandlerWithCallback(
<add> targetName,
<add> (event, eventType) =>
<add> callback && callback(event, eventType, targetName),
<add> );
<add> const eventListenerProps = this.relevantEvents.reduce(
<add> (acc, eventName) => {
<add> const eventPropName =
<add> 'on' + eventName[0].toUpperCase() + eventName.slice(1);
<add> return {
<add> ...acc,
<add> [eventPropName]: e => {
<add> recordedEventHandler(e, eventName);
<add> },
<add> };
<add> },
<add> {},
<add> );
<add> result[targetName] = eventListenerProps;
<add> }
<add> return result;
<add> }, [callback, targetNames]);
<add> }
<add>
<add> checkRecords(
<add> expected: Array<{
<add> type: string,
<add> target: string,
<add> optional?: boolean,
<add> }>,
<add> ): boolean {
<add> if (expected.length < this.allRecords.length) {
<add> return false;
<add> }
<add> let j = 0;
<add> for (let i = 0; i < expected.length; ++i) {
<add> if (j >= this.allRecords.length) {
<add> if (expected[i].optional === true) {
<add> continue;
<add> }
<add> return false;
<add> }
<add> if (
<add> expected[i].type === this.allRecords[j].type &&
<add> expected[i].target === this.allRecords[j].target
<add> ) {
<add> j++;
<add> continue;
<add> }
<add> if (expected[i].optional === true) {
<add> continue;
<add> }
<add> return false;
<add> }
<add> return true;
<add> }
<add>}
<add>
<add>export default RNTesterPlatformTestEventRecorder;
<ide><path>packages/rn-tester/js/examples/Experimental/W3CPointerEventPlatformTests/PointerEventPointerMoveAcross.js
<add>/**
<add> * Copyright (c) Meta Platforms, Inc. and affiliates.
<add> *
<add> * This source code is licensed under the MIT license found in the
<add> * LICENSE file in the root directory of this source tree.
<add> *
<add> * @format
<add> * @flow
<add> */
<add>
<add>import type {PlatformTestComponentBaseProps} from '../PlatformTest/RNTesterPlatformTestTypes';
<add>
<add>import RNTesterPlatformTest from '../PlatformTest/RNTesterPlatformTest';
<add>import RNTesterPlatformTestEventRecorder from '../PlatformTest/RNTesterPlatformTestEventRecorder';
<add>import * as React from 'react';
<add>import {useCallback, useState} from 'react';
<add>import {View, StyleSheet} from 'react-native';
<add>
<add>const styles = StyleSheet.create({
<add> a: {
<add> backgroundColor: 'red',
<add> height: 120,
<add> width: 200,
<add> },
<add> b: {
<add> marginLeft: 100,
<add> height: 120,
<add> width: 200,
<add> backgroundColor: 'green',
<add> },
<add> c: {
<add> height: 120,
<add> width: 200,
<add> backgroundColor: 'yellow',
<add> marginVertical: 100,
<add> marginLeft: 100,
<add> },
<add> a1: {
<add> backgroundColor: 'blue',
<add> height: 120,
<add> width: 200,
<add> },
<add> b1: {
<add> padding: 1,
<add> marginLeft: 100,
<add> height: 120,
<add> width: 200,
<add> backgroundColor: 'green',
<add> },
<add> c1: {
<add> height: 120,
<add> width: 200,
<add> backgroundColor: 'black',
<add> marginLeft: 100,
<add> },
<add>});
<add>
<add>const relevantEvents = [
<add> 'pointerMove',
<add> 'pointerOver',
<add> 'pointerEnter',
<add> 'pointerOut',
<add> 'pointerLeave',
<add>];
<add>
<add>const expected = [
<add> {type: 'pointerOver', target: 'a'},
<add> {type: 'pointerEnter', target: 'c'},
<add> {type: 'pointerEnter', target: 'b'},
<add> {type: 'pointerEnter', target: 'a'},
<add> {type: 'pointerMove', target: 'a', optional: true},
<add> {type: 'pointerOut', target: 'a'},
<add> {type: 'pointerLeave', target: 'a'},
<add> {type: 'pointerLeave', target: 'b'},
<add> {type: 'pointerOver', target: 'c'},
<add> {type: 'pointerMove', target: 'c', optional: true},
<add> {type: 'pointerOut', target: 'c'},
<add> {type: 'pointerLeave', target: 'c'},
<add> {type: 'pointerOver', target: 'a1'},
<add> {type: 'pointerEnter', target: 'c1'},
<add> {type: 'pointerEnter', target: 'b1'},
<add> {type: 'pointerEnter', target: 'a1'},
<add> {type: 'pointerMove', target: 'a1', optional: true},
<add> {type: 'pointerOut', target: 'a1'},
<add> {type: 'pointerLeave', target: 'a1'},
<add> {type: 'pointerLeave', target: 'b1'},
<add> {type: 'pointerOver', target: 'c1'},
<add> {type: 'pointerMove', target: 'c1', optional: true},
<add> {type: 'pointerOut', target: 'c1'},
<add> {type: 'pointerLeave', target: 'c1'},
<add>];
<add>
<add>const targetNames = ['a', 'b', 'c', 'a1', 'b1', 'c1'];
<add>
<add>// adapted from https://github.com/web-platform-tests/wpt/blob/master/uievents/order-of-events/mouse-events/mousemove-across.html
<add>function PointerEventPointerMoveAcrossTestCase(
<add> props: PlatformTestComponentBaseProps,
<add>) {
<add> const {harness} = props;
<add>
<add> const pointermove_across = harness.useAsyncTest(
<add> 'Pointermove events across elements should fire in the correct order.',
<add> );
<add>
<add> const [eventRecorder] = useState(
<add> () =>
<add> new RNTesterPlatformTestEventRecorder({
<add> mergeEventTypes: ['pointerMove'],
<add> relevantEvents,
<add> }),
<add> );
<add>
<add> const eventHandler = useCallback(
<add> (event: PointerEvent, eventType: string, eventTarget: string) => {
<add> event.stopPropagation();
<add> if (eventTarget === 'c1' && eventType === 'pointerLeave') {
<add> pointermove_across.step(({assert_true}) => {
<add> assert_true(
<add> eventRecorder.checkRecords(expected),
<add> 'Expected events to occur in the correct order',
<add> );
<add> });
<add> pointermove_across.done();
<add> }
<add> },
<add> [eventRecorder, pointermove_across],
<add> );
<add>
<add> const eventProps = eventRecorder.useRecorderTestEventHandlers(
<add> targetNames,
<add> eventHandler,
<add> );
<add>
<add> return (
<add> <>
<add> <View {...eventProps.c} style={styles.c}>
<add> <View {...eventProps.b} style={styles.b}>
<add> <View {...eventProps.a} style={styles.a} />
<add> </View>
<add> </View>
<add> <View {...eventProps.c1} style={styles.c1}>
<add> <View {...eventProps.b1} style={styles.b1}>
<add> <View {...eventProps.a1} style={styles.a1} />
<add> </View>
<add> </View>
<add> </>
<add> );
<add>}
<add>
<add>type Props = $ReadOnly<{}>;
<add>export default function PointerEventPointerMoveAcross(
<add> props: Props,
<add>): React.MixedElement {
<add> return (
<add> <RNTesterPlatformTest
<add> component={PointerEventPointerMoveAcrossTestCase}
<add> description=""
<add> instructions={[
<add> 'Move your mouse across the yellow/red <div> element quickly from right to left',
<add> 'Move your mouse across the black/blue <div> element quickly from right to left',
<add> 'If the test fails, redo it again and move faster on the black/blue <div> element next time.',
<add> ]}
<add> title="Pointermove handling across elements"
<add> />
<add> );
<add>}
<ide><path>packages/rn-tester/js/examples/Experimental/W3CPointerEventsExample.js
<ide> import CompatibilityNativeGestureHandling from './Compatibility/CompatibilityNat
<ide> import PointerEventPrimaryTouchPointer from './W3CPointerEventPlatformTests/PointerEventPrimaryTouchPointer';
<ide> import PointerEventAttributesNoHoverPointers from './W3CPointerEventPlatformTests/PointerEventAttributesNoHoverPointers';
<ide> import PointerEventPointerMoveOnChordedMouseButton from './W3CPointerEventPlatformTests/PointerEventPointerMoveOnChordedMouseButton';
<add>import PointerEventPointerMoveAcross from './W3CPointerEventPlatformTests/PointerEventPointerMoveAcross';
<ide> import EventfulView from './W3CPointerEventsEventfulView';
<ide>
<ide> function AbsoluteChildExample({log}: {log: string => void}) {
<ide> export default {
<ide> return <PointerEventPointerMoveOnChordedMouseButton />;
<ide> },
<ide> },
<add> {
<add> name: 'pointerevent_pointermove_across',
<add> description: '',
<add> title: 'Pointermove handling across elements',
<add> render(): React.Node {
<add> return <PointerEventPointerMoveAcross />;
<add> },
<add> },
<ide> CompatibilityAnimatedPointerMove,
<ide> CompatibilityNativeGestureHandling,
<ide> ],
| 3
|
Python
|
Python
|
prevent crash in poly1d.__eq__
|
18b7cd9df7a4d960550b18faa14d5473e7d5c3d9
|
<ide><path>numpy/lib/polynomial.py
<ide> def __rdiv__(self, other):
<ide> __rtruediv__ = __rdiv__
<ide>
<ide> def __eq__(self, other):
<add> if not isinstance(other, poly1d):
<add> return NotImplemented
<ide> if self.coeffs.shape != other.coeffs.shape:
<ide> return False
<ide> return (self.coeffs == other.coeffs).all()
<ide>
<ide> def __ne__(self, other):
<add> if not isinstance(other, poly1d):
<add> return NotImplemented
<ide> return not self.__eq__(other)
<ide>
<ide> def __setattr__(self, key, val):
<ide><path>numpy/lib/tests/test_polynomial.py
<ide> def test_poly_int_overflow(self):
<ide> v = np.arange(1, 21)
<ide> assert_almost_equal(np.poly(v), np.poly(np.diag(v)))
<ide>
<add> def test_poly_eq(self):
<add> p = np.poly1d([1, 2, 3])
<add> p2 = np.poly1d([1, 2, 4])
<add> assert_equal(p == None, False)
<add> assert_equal(p != None, True)
<add> assert_equal(p == p, True)
<add> assert_equal(p == p2, False)
<add> assert_equal(p != p2, True)
<add>
<ide>
<ide> if __name__ == "__main__":
<ide> run_module_suite()
| 2
|
PHP
|
PHP
|
remove redundant case
|
8276ecde5ea5439bc5f5ef2c295da921b4001e11
|
<ide><path>src/ORM/Association/BelongsToMany.php
<ide> protected function _diffLinks(
<ide> foreach ($unmatchedEntityKeys as $i => $unmatchedKeys) {
<ide> $matched = false;
<ide> foreach ($keys as $key) {
<del> if (!array_key_exists($key, $unmatchedKeys) || !array_key_exists($key, $existingKeys)) {
<del> // Either side missing is no match.
<del> $matched = false;
<del> } elseif (is_object($unmatchedKeys[$key]) && is_object($existingKeys[$key])) {
<add> if (is_object($unmatchedKeys[$key]) && is_object($existingKeys[$key])) {
<ide> // If both sides are an object then use == so that value objects
<ide> // are seen as equivalent.
<ide> $matched = $existingKeys[$key] == $unmatchedKeys[$key];
<ide><path>tests/TestCase/ORM/Association/BelongsToManyTest.php
<ide> public function testReplaceLinksComplexTypeForeignKey()
<ide> $this->assertEquals('tag2', $result->tags[1]->name);
<ide> }
<ide>
<add> public function testReplaceLinksMissingKeyData()
<add> {
<add> $articles = $this->fetchTable('Articles');
<add> $tags = $this->fetchTable('Tags');
<add>
<add> $articles->belongsToMany('Tags');
<add> $article = $articles->find()->firstOrFail();
<add>
<add> $tag1 = $tags->find()->where(['Tags.name' => 'tag1'])->firstOrFail();
<add> $tag1->_joinData = new ArticlesTag(['tag_id' => 99]);
<add>
<add> $article->tags = [$tag1];
<add> $articles->saveOrFail($article, ['associated' => ['Tags']]);
<add>
<add> $this->assertCount(1, $article->tags);
<add> }
<add>
<ide> /**
<ide> * Provider for empty values
<ide> *
| 2
|
Javascript
|
Javascript
|
replace operator = with ===
|
7811eadac7e0be7a4a1c8e08f464de639ba44503
|
<ide><path>src/ng/filter/orderBy.js
<ide> * Can be one of:
<ide> *
<ide> * - `function`: Getter function. The result of this function will be sorted using the
<del> * `<`, `=`, `>` operator.
<add> * `<`, `===`, `>` operator.
<ide> * - `string`: An Angular expression. The result of this expression is used to compare elements
<ide> * (for example `name` to sort by a property called `name` or `name.substr(0, 3)` to sort by
<ide> * 3 first characters of a property called `name`). The result of a constant expression
| 1
|
Ruby
|
Ruby
|
allow custom url in extract_plist strategy
|
916c9806768b14a6229c9c9c6bee06f90fbcaab2
|
<ide><path>Library/Homebrew/livecheck/strategy/extract_plist.rb
<ide> def self.versions_from_items(items, regex = nil, &block)
<ide> sig {
<ide> params(
<ide> cask: Cask::Cask,
<add> url: T.nilable(String),
<ide> regex: T.nilable(Regexp),
<ide> _unused: T.nilable(T::Hash[Symbol, T.untyped]),
<ide> block: T.untyped,
<ide> ).returns(T::Hash[Symbol, T.untyped])
<ide> }
<del> def self.find_versions(cask:, regex: nil, **_unused, &block)
<add> def self.find_versions(cask:, url: nil, regex: nil, **_unused, &block)
<ide> if regex.present? && block.blank?
<ide> raise ArgumentError, "#{T.must(name).demodulize} only supports a regex when using a `strategy` block"
<ide> end
<ide> raise ArgumentError, "The #{T.must(name).demodulize} strategy only supports casks." unless T.unsafe(cask)
<ide>
<ide> match_data = { matches: {}, regex: regex }
<ide>
<del> unversioned_cask_checker = UnversionedCaskChecker.new(cask)
<add> if url && url != cask.url.to_s
<add> cask_object_for_livecheck = Cask::Cask.new("livecheck-cask", config: cask.config) do
<add> url url.to_s
<add> end
<add>
<add> unversioned_cask_checker = UnversionedCaskChecker.new(cask, livecheck_url: cask_object_for_livecheck)
<add> else
<add> unversioned_cask_checker = UnversionedCaskChecker.new(cask)
<add> end
<add>
<ide> items = unversioned_cask_checker.all_versions.transform_values { |v| Item.new(bundle_version: v) }
<ide>
<ide> versions_from_items(items, regex, &block).each do |version_text|
<ide><path>Library/Homebrew/unversioned_cask_checker.rb
<ide> class UnversionedCaskChecker
<ide>
<ide> sig { returns(Cask::Cask) }
<ide> attr_reader :cask
<add> attr_reader :livecheck_url
<ide>
<del> sig { params(cask: Cask::Cask).void }
<del> def initialize(cask)
<add> sig { params(cask: Cask::Cask, livecheck_url: T.nilable(Cask::Cask)).void }
<add> def initialize(cask, livecheck_url: nil)
<ide> @cask = cask
<add> @livecheck_url = livecheck_url
<ide> end
<ide>
<ide> sig { returns(Cask::Installer) }
<ide> def installer
<del> @installer ||= Cask::Installer.new(cask, verify_download_integrity: false)
<add> @installer ||= if livecheck_url
<add> Cask::Installer.new(livecheck_url, verify_download_integrity: false)
<add> else
<add> Cask::Installer.new(cask, verify_download_integrity: false)
<add> end
<ide> end
<ide>
<ide> sig { returns(T::Array[Cask::Artifact::App]) }
| 2
|
Javascript
|
Javascript
|
remove unused parameter
|
dc53b1e4fd91e79f7d18c223b4af49c69c81d182
|
<ide><path>lib/FlagDependencyExportsPlugin.js
<ide> class FlagDependencyExportsPlugin {
<ide> depBlock.blocks.forEach(processDependenciesBlock);
<ide> }
<ide>
<del> function processDependency(dep, usedExports) {
<add> function processDependency(dep) {
<ide> const exportDesc = dep.getExports && dep.getExports();
<ide> if(!exportDesc) return;
<ide> moduleWithExports = true;
| 1
|
PHP
|
PHP
|
add html as a new valid extension for views
|
998d4b88f50e9ab655834a31f3af36e9f7bd301a
|
<ide><path>src/Illuminate/View/Factory.php
<ide> class Factory implements FactoryContract
<ide> 'blade.php' => 'blade',
<ide> 'php' => 'php',
<ide> 'css' => 'file',
<add> 'html' => 'file',
<ide> ];
<ide>
<ide> /**
<ide><path>src/Illuminate/View/FileViewFinder.php
<ide> class FileViewFinder implements ViewFinderInterface
<ide> *
<ide> * @var array
<ide> */
<del> protected $extensions = ['blade.php', 'php', 'css'];
<add> protected $extensions = ['blade.php', 'php', 'css', 'html'];
<ide>
<ide> /**
<ide> * Create a new file view loader instance.
<ide><path>tests/View/ViewFileViewFinderTest.php
<ide> public function testDirectoryCascadingFileLoading()
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.blade.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.css')->andReturn(false);
<add> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.html')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/nested/foo.blade.php')->andReturn(true);
<ide>
<ide> $this->assertEquals(__DIR__.'/nested/foo.blade.php', $finder->find('foo'));
<ide> public function testDirectoryCascadingNamespacedFileLoading()
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo/bar/baz.blade.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo/bar/baz.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo/bar/baz.css')->andReturn(false);
<add> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo/bar/baz.html')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/bar/bar/baz.blade.php')->andReturn(true);
<ide>
<ide> $this->assertEquals(__DIR__.'/bar/bar/baz.blade.php', $finder->find('foo::bar.baz'));
<ide> public function testExceptionThrownWhenViewNotFound()
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.blade.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.php')->andReturn(false);
<ide> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.css')->andReturn(false);
<add> $finder->getFilesystem()->shouldReceive('exists')->once()->with(__DIR__.'/foo.html')->andReturn(false);
<ide>
<ide> $finder->find('foo');
<ide> }
<ide> public function testAddingExtensionsReplacesOldOnes()
<ide> $finder->addExtension('baz');
<ide> $finder->addExtension('baz');
<ide>
<del> $this->assertCount(4, $finder->getExtensions());
<add> $this->assertCount(5, $finder->getExtensions());
<ide> }
<ide>
<ide> public function testPassingViewWithHintReturnsTrue()
| 3
|
Python
|
Python
|
unbundle merge layer
|
bc92fb32c0d60b7dca189ef5bd54918438726204
|
<ide><path>keras/engine/topology.py
<ide>
<ide> import numpy as np
<ide>
<del>import types as python_types
<ide> import warnings
<ide> import copy
<ide> import os
<ide> from .. import backend as K
<ide> from .. import initializers
<ide> from ..utils.io_utils import ask_to_proceed_with_overwrite
<del>from ..utils.generic_utils import func_dump, func_load
<del>
<del>
<del>def to_list(x):
<del> """This normalizes a list/tensor into a list.
<del>
<del> If a tensor is passed, we return
<del> a list of size 1 containing the tensor.
<del> """
<del> if isinstance(x, list):
<del> return x
<del> return [x]
<del>
<del>
<del>def _object_list_uid(object_list):
<del> object_list = to_list(object_list)
<del> return ', '.join([str(abs(id(x))) for x in object_list])
<del>
<del>
<del>def _is_all_none(iterable_or_element):
<del> if not isinstance(iterable_or_element, (list, tuple)):
<del> iterable = [iterable_or_element]
<del> else:
<del> iterable = iterable_or_element
<del> for element in iterable:
<del> if element is not None:
<del> return False
<del> return True
<del>
<del>
<del>def _collect_previous_mask(input_tensors):
<del> # Return the output mask(s) of the previous node.
<del> input_tensors = to_list(input_tensors)
<del> inbound_layers = []
<del> node_indices = []
<del> tensor_indices = []
<del> for x in input_tensors:
<del> if hasattr(x, '_keras_history'):
<del> inbound_layer, node_index, tensor_index = x._keras_history
<del> inbound_layers.append(inbound_layer)
<del> node_indices.append(node_index)
<del> tensor_indices.append(tensor_index)
<del> else:
<del> raise ValueError('Input tensor is not a Keras tensor:', x)
<del> nodes = [layer.inbound_nodes[i] for layer, i in zip(inbound_layers, node_indices)]
<del> masks = [node.output_masks[i] for node, i in zip(nodes, tensor_indices)]
<del> if len(masks) == 1:
<del> return masks[0]
<del> return masks
<del>
<del>
<del>def _to_snake_case(name):
<del> intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
<del> insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
<del> # If the class is private the name starts with "_" which is not secure
<del> # for creating scopes. We prefix the name with "private" in this case.
<del> if insecure[0] != '_':
<del> return insecure
<del> return 'private' + insecure
<del>
<del>
<del>def _collect_input_shape(input_tensors):
<del> # Return the output shape(s) of a list of Keras tensors.
<del> input_tensors = to_list(input_tensors)
<del> shapes = []
<del> for x in input_tensors:
<del> if hasattr(x, '_keras_shape'):
<del> shapes.append(x._keras_shape)
<del> else:
<del> raise ValueError('Input tensor is not a Keras tensor:', x)
<del> if len(shapes) == 1:
<del> return shapes[0]
<del> return shapes
<ide>
<ide>
<ide> class InputSpec(object):
<ide> def assert_input_compatibility(self, input):
<ide> raise TypeError('input_spec must be a list of '
<ide> 'InputSpec instances. Found: ' +
<ide> str(self.input_spec))
<del> inputs = to_list(input)
<add> inputs = _to_list(input)
<ide> if len(self.input_spec) > 1:
<ide> if len(inputs) != len(self.input_spec):
<ide> raise ValueError('Layer ' + self.name + ' expects ' +
<ide> def __call__(self, x, **kwargs):
<ide>
<ide> # Collect input shapes to build layer.
<ide> input_shapes = []
<del> for x_elem in to_list(x):
<add> for x_elem in _to_list(x):
<ide> if hasattr(x_elem, '_keras_shape'):
<ide> input_shapes.append(x_elem._keras_shape)
<ide> elif hasattr(K, 'int_shape'):
<ide> def __call__(self, x, **kwargs):
<ide>
<ide> # Apply activity regularizer if any:
<ide> if hasattr(self, 'activity_regularizer') and self.activity_regularizer is not None:
<del> regularization_losses = [self.activity_regularizer(x) for x in to_list(output)]
<del> self.add_loss(regularization_losses, to_list(x))
<add> regularization_losses = [self.activity_regularizer(x) for x in _to_list(output)]
<add> self.add_loss(regularization_losses, _to_list(x))
<ide>
<ide> return output
<ide>
<ide> def _add_inbound_node(self, input_tensors, output_tensors,
<ide> """
<ide> TODO
<ide> """
<del> input_tensors = to_list(input_tensors)
<del> output_tensors = to_list(output_tensors)
<del> input_masks = to_list(input_masks)
<del> output_masks = to_list(output_masks)
<del> input_shapes = to_list(output_shapes)
<del> output_shapes = to_list(output_shapes)
<add> input_tensors = _to_list(input_tensors)
<add> output_tensors = _to_list(output_tensors)
<add> input_masks = _to_list(input_masks)
<add> output_masks = _to_list(output_masks)
<add> input_shapes = _to_list(output_shapes)
<add> output_shapes = _to_list(output_shapes)
<ide>
<ide> # Collect input tensor(s) coordinates.
<ide> inbound_layers = []
<ide> def add_loss(self, losses, inputs=None):
<ide> if losses is None:
<ide> return
<ide> # Update self.losses
<del> losses = to_list(losses)
<add> losses = _to_list(losses)
<ide> if not hasattr(self, 'losses'):
<ide> self.losses = []
<ide> try:
<ide> def add_update(self, updates, inputs=None):
<ide> if updates is None:
<ide> return
<ide> # Update self.updates
<del> updates = to_list(updates)
<add> updates = _to_list(updates)
<ide> if not hasattr(self, 'updates'):
<ide> self.updates = []
<ide> try:
<ide> def Input(shape=None, batch_shape=None,
<ide> return outputs
<ide>
<ide>
<del>class Merge(Layer):
<del> """A `Merge` layer can be used to merge a list of tensors
<del> into a single tensor, following some merge `mode`.
<del>
<del> # Example
<del>
<del> ```python
<del> model1 = Sequential()
<del> model1.add(Dense(32, input_dim=32))
<del>
<del> model2 = Sequential()
<del> model2.add(Dense(32, input_dim=32))
<del>
<del> merged_model = Sequential()
<del> merged_model.add(Merge([model1, model2], mode='concat', concat_axis=1))
<del> ```
<del>
<del> # Arguments
<del> layers: Can be a list of Keras tensors or
<del> a list of layer instances. Must be more
<del> than one layer/tensor.
<del> mode: String or lambda/function. If string, must be one
<del> of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'.
<del> If lambda/function, it should take as input a list of tensors
<del> and return a single tensor.
<del> concat_axis: Integer, axis to use in mode `concat`.
<del> dot_axes: Integer or tuple of integers,
<del> axes to use in mode `dot` or `cos`.
<del> output_shape: Either a shape tuple (tuple of integers),
<del> or a lambda/function
<del> to compute `output_shape`
<del> (only if merge mode is a lambda/function).
<del> If the argument is a tuple,
<del> it should be expected output shape, *not* including the batch size
<del> (same convention as the `input_shape` argument in layers).
<del> If the argument is callable,
<del> it should take as input a list of shape tuples
<del> (1:1 mapping to input tensors)
<del> and return a single shape tuple, including the
<del> batch size (same convention as the
<del> `get_output_shape_for` method of layers).
<del> node_indices: Optional list of integers containing
<del> the output node index for each input layer
<del> (in case some input layers have multiple output nodes).
<del> will default to an array of 0s if not provided.
<del> tensor_indices: Optional list of indices of output tensors
<del> to consider for merging
<del> (in case some input layer node returns multiple tensors).
<del> output_mask: Mask or lambda/function to compute the output mask (only
<del> if merge mode is a lambda/function). If the latter case, it should
<del> take as input a list of masks and return a single mask.
<del> """
<del>
<del> def __init__(self, layers=None, mode='sum', concat_axis=-1,
<del> dot_axes=-1, output_shape=None, output_mask=None,
<del> arguments=None, node_indices=None, tensor_indices=None,
<del> name=None):
<del> # TODO: call parent's __init__ instead.
<del> self.layers = layers
<del> self.mode = mode
<del> self.concat_axis = concat_axis
<del> self.dot_axes = dot_axes
<del> self._output_shape = output_shape
<del> self.node_indices = node_indices
<del> self._output_mask = output_mask
<del> self.arguments = arguments if arguments else {}
<del>
<del> # Layer parameters.
<del> self.inbound_nodes = []
<del> self.outbound_nodes = []
<del> self.constraints = {}
<del> self._trainable_weights = []
<del> self._non_trainable_weights = []
<del> self.input_spec = None # Compatible with anything.
<del> if not name:
<del> prefix = self.__class__.__name__.lower()
<del> name = prefix + '_' + str(K.get_uid(prefix))
<del> self.name = name
<del>
<del> if layers:
<del> # This exists for backwards compatibility.
<del> # equivalent to:
<del> # merge = Merge(layers=None)
<del> # output = merge([input_tensor_1, input_tensor_2])
<del> if not node_indices:
<del> # By default we connect to
<del> # the 1st output stream in the input layer.
<del> node_indices = [0 for _ in range(len(layers))]
<del> self._arguments_validation(layers, mode,
<del> concat_axis, dot_axes,
<del> node_indices, tensor_indices)
<del> self.built = True
<del> self._add_inbound_node(layers, node_indices, tensor_indices)
<del> else:
<del> self.built = False
<del>
<del> def _arguments_validation(self, layers, mode, concat_axis, dot_axes,
<del> node_indices, tensor_indices):
<del> """Validates user-passed arguments and raises exceptions
<del> as appropriate.
<del> """
<del> if not callable(mode):
<del> if mode not in {'sum', 'mul', 'concat', 'ave', 'cos', 'dot', 'max'}:
<del> raise ValueError('Invalid merge mode: ' + str(mode))
<del> if not isinstance(layers, (list, tuple)) or len(layers) < 2:
<del> raise TypeError('A Merge should only be applied to a list of '
<del> 'layers with at least 2 elements. Found: ' +
<del> str(layers))
<del>
<del> if tensor_indices is None:
<del> tensor_indices = [None for _ in range(len(layers))]
<del>
<del> input_shapes = []
<del> for i, layer in enumerate(layers):
<del> layer_output_shape = layer.get_output_shape_at(node_indices[i])
<del> if isinstance(layer_output_shape, list):
<del> # Case: the layer has multiple output tensors
<del> # and we only need a specific one.
<del> layer_output_shape = layer_output_shape[tensor_indices[i]]
<del> input_shapes.append(layer_output_shape)
<del>
<del> if mode in {'sum', 'mul', 'ave', 'cos', 'max'}:
<del> input_shapes_set = set(input_shapes)
<del> if len(input_shapes_set) > 1:
<del> raise ValueError('Only layers of same output shape can '
<del> 'be merged using ' + mode + ' mode. ' +
<del> 'Layer shapes: %s' % input_shapes)
<del> if mode in {'cos', 'dot'}:
<del> if len(layers) > 2:
<del> raise ValueError(mode + ' merge takes exactly 2 layers')
<del> shape1 = input_shapes[0]
<del> shape2 = input_shapes[1]
<del> n1 = len(shape1)
<del> n2 = len(shape2)
<del> if isinstance(dot_axes, int):
<del> if dot_axes < 0:
<del> self.dot_axes = [dot_axes % n1, dot_axes % n2]
<del> else:
<del> self.dot_axes = [dot_axes, ] * 2
<del> if not isinstance(self.dot_axes, (list, tuple)):
<del> raise TypeError('Invalid type for dot_axes - '
<del> 'should be a list.')
<del> if len(self.dot_axes) != 2:
<del> raise ValueError('Invalid format for dot_axes - '
<del> 'should contain two elements.')
<del> if not isinstance(self.dot_axes[0], int) or not isinstance(self.dot_axes[1], int):
<del> raise ValueError('Invalid format for dot_axes - '
<del> 'list elements should be "int".')
<del> if shape1[self.dot_axes[0]] != shape2[self.dot_axes[1]]:
<del> raise ValueError('Dimension incompatibility using dot mode: '
<del> '%s != %s. ' % (shape1[self.dot_axes[0]], shape2[self.dot_axes[1]]) +
<del> 'Layer shapes: %s, %s' % (shape1, shape2))
<del> elif mode == 'concat':
<del> reduced_inputs_shapes = [list(shape) for shape in input_shapes]
<del> shape_set = set()
<del> for i in range(len(reduced_inputs_shapes)):
<del> del reduced_inputs_shapes[i][self.concat_axis]
<del> shape_set.add(tuple(reduced_inputs_shapes[i]))
<del> if len(shape_set) > 1:
<del> raise ValueError('"concat" mode can only merge '
<del> 'layers with matching '
<del> 'output shapes except for the concat axis. '
<del> 'Layer shapes: %s' % (input_shapes))
<del>
<del> def call(self, inputs):
<del> if not isinstance(inputs, list) or len(inputs) <= 1:
<del> raise TypeError('Merge must be called on a list of tensors '
<del> '(at least 2). Got: ' + str(inputs))
<del> # Case: "mode" is a lambda or function.
<del> if callable(self.mode):
<del> arguments = self.arguments
<del> arg_spec = inspect.getargspec(self.mode)
<del> if 'mask' in arg_spec.args:
<del> arguments['mask'] = mask
<del> return self.mode(inputs, **arguments)
<del>
<del> if self.mode == 'sum' or self.mode == 'ave':
<del> s = inputs[0]
<del> for i in range(1, len(inputs)):
<del> s += inputs[i]
<del> if self.mode == 'ave':
<del> s /= len(inputs)
<del> return s
<del>
<del> elif self.mode == 'concat':
<del> return K.concatenate(inputs, axis=self.concat_axis)
<del>
<del> elif self.mode == 'mul':
<del> s = inputs[0]
<del> for i in range(1, len(inputs)):
<del> s *= inputs[i]
<del> return s
<del> elif self.mode == 'max':
<del> s = inputs[0]
<del> for i in range(1, len(inputs)):
<del> s = K.maximum(s, inputs[i])
<del> return s
<del> elif self.mode == 'dot':
<del> l1 = inputs[0]
<del> l2 = inputs[1]
<del> output = K.batch_dot(l1, l2, self.dot_axes)
<del> return output
<del>
<del> elif self.mode == 'cos':
<del> l1 = inputs[0]
<del> l2 = inputs[1]
<del> denominator = K.sqrt(K.batch_dot(l1, l1, self.dot_axes) *
<del> K.batch_dot(l2, l2, self.dot_axes))
<del> denominator = K.maximum(denominator, K.epsilon())
<del> output = K.batch_dot(l1, l2, self.dot_axes) / denominator
<del> output = K.expand_dims(output, 1)
<del> return output
<del> else:
<del> raise ValueError('Unknown merge mode.')
<del>
<del> def __call__(self, inputs, **kwargs):
<del> """We disable successive calls to __call__ for Merge layers.
<del> Although there is no technical obstacle to
<del> making it possible to __call__ a Merge instance many times
<del> (it is just a layer), it would make for a rather inelegant API.
<del> """
<del> if not isinstance(inputs, list):
<del> raise TypeError('Merge can only be called on a list of tensors, '
<del> 'not a single tensor. Received: ' + str(inputs))
<del> if self.built:
<del> raise RuntimeError('A Merge layer cannot be used more than once, '
<del> 'please use '
<del> 'the "merge" function instead: '
<del> '`merged_tensor = merge([tensor_1, tensor2])`.')
<del>
<del> all_keras_tensors = True
<del> for x in inputs:
<del> if not hasattr(x, '_keras_history'):
<del> all_keras_tensors = False
<del> break
<del>
<del> if all_keras_tensors:
<del> layers = []
<del> node_indices = []
<del> tensor_indices = []
<del> for x in inputs:
<del> layer, node_index, tensor_index = x._keras_history
<del> layers.append(layer)
<del> node_indices.append(node_index)
<del> tensor_indices.append(tensor_index)
<del> self._arguments_validation(layers, self.mode,
<del> self.concat_axis, self.dot_axes,
<del> node_indices, tensor_indices)
<del> self.built = True
<del> self._add_inbound_node(layers, node_indices, tensor_indices)
<del>
<del> outputs = self.inbound_nodes[-1].output_tensors
<del> return outputs[0] # Merge only returns a single tensor.
<del> else:
<del> return self.masked_call(inputs, **kwargs)
<del>
<del> def get_output_shape_for(self, input_shape):
<del> # Must have multiple input shape tuples.
<del> assert isinstance(input_shape, list)
<del> # Case: callable self._output_shape.
<del> if callable(self.mode):
<del> if callable(self._output_shape):
<del> output_shape = self._output_shape(input_shape)
<del> return output_shape
<del> elif self._output_shape is not None:
<del> return (input_shape[0][0],) + tuple(self._output_shape)
<del> else:
<del> # TODO: consider shape auto-inference with TF.
<del> raise ValueError('The Merge layer ' + self.name +
<del> ' has a callable `mode` argument, '
<del> 'and we cannot infer its output shape '
<del> 'because no `output_shape` '
<del> 'argument was provided. '
<del> 'Make sure to pass a shape tuple '
<del> '(or callable) '
<del> '`output_shape` to Merge.')
<del> # Pre-defined merge modes.
<del> input_shapes = input_shape
<del> if self.mode in ['sum', 'mul', 'ave', 'max']:
<del> # All tuples in input_shapes should be the same.
<del> return input_shapes[0]
<del> elif self.mode == 'concat':
<del> output_shape = list(input_shapes[0])
<del> for shape in input_shapes[1:]:
<del> if output_shape[self.concat_axis] is None or shape[self.concat_axis] is None:
<del> output_shape[self.concat_axis] = None
<del> break
<del> output_shape[self.concat_axis] += shape[self.concat_axis]
<del> return tuple(output_shape)
<del> elif self.mode in ['dot', 'cos']:
<del> shape1 = list(input_shapes[0])
<del> shape2 = list(input_shapes[1])
<del> shape1.pop(self.dot_axes[0])
<del> shape2.pop(self.dot_axes[1])
<del> shape2.pop(0)
<del> output_shape = shape1 + shape2
<del> if len(output_shape) == 1:
<del> output_shape += [1]
<del> return tuple(output_shape)
<del>
<del> def compute_mask(self, inputs, mask=None):
<del> if mask is None or all([m is None for m in mask]):
<del> return None
<del>
<del> assert hasattr(mask, '__len__') and len(mask) == len(inputs)
<del>
<del> if self.mode in ['sum', 'mul', 'ave']:
<del> masks = [K.expand_dims(m, 0) for m in mask if m is not None]
<del> return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
<del> elif self.mode == 'concat':
<del> # Make a list of masks while making sure
<del> # the dimensionality of each mask
<del> # is the same as the corresponding input.
<del> masks = []
<del> for input_i, mask_i in zip(inputs, mask):
<del> if mask_i is None:
<del> # Input is unmasked. Append all 1s to masks,
<del> # but cast it to uint8 first
<del> masks.append(K.cast(K.ones_like(input_i), 'uint8'))
<del> elif K.ndim(mask_i) < K.ndim(input_i):
<del> # Mask is smaller than the input, expand it
<del> masks.append(K.expand_dims(mask_i))
<del> else:
<del> masks.append(mask_i)
<del> concatenated = K.concatenate(masks, axis=self.concat_axis)
<del> return K.all(concatenated, axis=-1, keepdims=False)
<del> elif self.mode in ['cos', 'dot']:
<del> return None
<del> elif callable(self.mode):
<del> if callable(self._output_mask):
<del> return self._output_mask(mask)
<del> else:
<del> return self._output_mask
<del> else:
<del> # This should have been caught earlier.
<del> raise ValueError('Invalid merge mode: {}'.format(self.mode))
<del>
<del> def get_config(self):
<del> if isinstance(self.mode, python_types.LambdaType):
<del> mode = func_dump(self.mode)
<del> mode_type = 'lambda'
<del> elif callable(self.mode):
<del> mode = self.mode.__name__
<del> mode_type = 'function'
<del> else:
<del> mode = self.mode
<del> mode_type = 'raw'
<del>
<del> if isinstance(self._output_shape, python_types.LambdaType):
<del> output_shape = func_dump(self._output_shape)
<del> output_shape_type = 'lambda'
<del> elif callable(self._output_shape):
<del> output_shape = self._output_shape.__name__
<del> output_shape_type = 'function'
<del> else:
<del> output_shape = self._output_shape
<del> output_shape_type = 'raw'
<del>
<del> if isinstance(self._output_mask, python_types.LambdaType):
<del> output_mask = func_dump(self._output_mask)
<del> output_mask_type = 'lambda'
<del> elif callable(self._output_mask):
<del> output_mask = self._output_mask.__name__
<del> output_mask_type = 'function'
<del> else:
<del> output_mask = self._output_mask
<del> output_mask_type = 'raw'
<del>
<del> return {'name': self.name,
<del> 'mode': mode,
<del> 'mode_type': mode_type,
<del> 'concat_axis': self.concat_axis,
<del> 'dot_axes': self.dot_axes,
<del> 'output_shape': output_shape,
<del> 'output_shape_type': output_shape_type,
<del> 'output_mask': output_mask,
<del> 'output_mask_type': output_mask_type,
<del> 'arguments': self.arguments}
<del>
<del> @classmethod
<del> def from_config(cls, config):
<del> mode_type = config.pop('mode_type')
<del> if mode_type == 'function':
<del> mode = globals()[config['mode']]
<del> elif mode_type == 'lambda':
<del> mode = func_load(config['mode'], globs=globals())
<del> else:
<del> mode = config['mode']
<del>
<del> output_shape_type = config.pop('output_shape_type', None)
<del> if output_shape_type == 'function':
<del> output_shape = globals()[config['output_shape']]
<del> elif output_shape_type == 'lambda':
<del> output_shape = func_load(config['output_shape'],
<del> globs=globals())
<del> else:
<del> output_shape = config.get('output_shape')
<del>
<del> output_mask_type = config.pop('output_mask_type', None)
<del> if output_mask_type == 'function':
<del> output_mask = globals()[config['output_mask']]
<del> elif output_mask_type == 'lambda':
<del> output_mask = func_load(config['output_mask'],
<del> globs=globals())
<del> else:
<del> output_mask = config.get('output_mask')
<del>
<del> config['mode'] = mode
<del> config['output_shape'] = output_shape
<del> config['output_mask'] = output_mask
<del> return super(Merge, cls).from_config(config)
<del>
<del>
<del>def merge(inputs, mode='sum', concat_axis=-1,
<del> dot_axes=-1, output_shape=None, output_mask=None,
<del> arguments=None, name=None):
<del> """Functional merge, to apply to Keras tensors (NOT layers).
<del> Returns a Keras tensor.
<del>
<del> # Example
<del>
<del> ```python
<del> tensor_a = Input(shape=(32,))
<del> tensor_b = Input(shape=(32,))
<del> merged_tensor = merge([tensor_a, tensor_b], mode='concat', concat_axis=1)
<del> ```
<del>
<del> # Arguments
<del> mode: String or lambda/function. If string, must be one
<del> of: 'sum', 'mul', 'concat', 'ave', 'cos', 'dot'.
<del> If lambda/function, it should take as input a list of tensors
<del> and return a single tensor.
<del> concat_axis: Integer, axis to use in mode `concat`.
<del> dot_axes: Integer or tuple of integers,
<del> axes to use in mode `dot` or `cos`.
<del> output_shape: Shape tuple (tuple of integers), or lambda/function
<del> to compute output_shape (only if merge mode is a lambda/function).
<del> If the latter case, it should take as input a list of shape tuples
<del> (1:1 mapping to input tensors) and return a single shape tuple,
<del> including the batch size
<del> (same convention as the `get_output_shape_for` method of layers).
<del> node_indices: Optional list of integers containing
<del> the output node index for each input layer
<del> (in case some input layers have multiple output nodes).
<del> will default to an array of 0s if not provided.
<del> tensor_indices: Optional list of indices of output tensors
<del> to consider for merging
<del> (in case some input layer node returns multiple tensors).
<del> """
<del> all_keras_tensors = True
<del> for x in inputs:
<del> if not hasattr(x, '_keras_history'):
<del> all_keras_tensors = False
<del> break
<del> if all_keras_tensors:
<del> input_layers = []
<del> node_indices = []
<del> tensor_indices = []
<del> for x in inputs:
<del> input_layer, node_index, tensor_index = x._keras_history
<del> input_layers.append(input_layer)
<del> node_indices.append(node_index)
<del> tensor_indices.append(tensor_index)
<del> merge_layer = Merge(input_layers, mode=mode,
<del> concat_axis=concat_axis,
<del> dot_axes=dot_axes,
<del> output_shape=output_shape,
<del> output_mask=output_mask,
<del> arguments=arguments,
<del> node_indices=node_indices,
<del> tensor_indices=tensor_indices,
<del> name=name)
<del> return merge_layer.inbound_nodes[0].output_tensors[0]
<del> else:
<del> merge_layer = Merge(mode=mode,
<del> concat_axis=concat_axis,
<del> dot_axes=dot_axes,
<del> output_shape=output_shape,
<del> output_mask=output_mask,
<del> arguments=arguments,
<del> name=name)
<del> return merge_layer(inputs)
<del>
<del>
<ide> class Container(Layer):
<ide> """A Container is a directed acyclic graph of layers.
<ide>
<ide> class Container(Layer):
<ide> from_config
<ide> """
<ide>
<del> def __init__(self, input, output, name=None):
<add> def __init__(self, inputs, outputs, name=None):
<ide> # TODO: call parent's __init__ instead.
<ide> # Handle name argument.
<ide> if not name:
<ide> def __init__(self, input, output, name=None):
<ide> self.trainable = True
<ide>
<ide> # Container-specific properties.
<del> if isinstance(input, (list, tuple)):
<del> self.inputs = list(input) # Tensor or list of tensors.
<add> if isinstance(inputs, (list, tuple)):
<add> self.inputs = list(inputs) # Tensor or list of tensors.
<ide> else:
<del> self.inputs = [input]
<del> if isinstance(output, (list, tuple)):
<del> self.outputs = list(output)
<add> self.inputs = [inputs]
<add> if isinstance(outputs, (list, tuple)):
<add> self.outputs = list(outputs)
<ide> else:
<del> self.outputs = [output]
<add> self.outputs = [outputs]
<ide>
<ide> # Check for redundancy in inputs.
<ide> inputs_set = set(self.inputs)
<ide> def call(self, input, mask=None):
<ide> A tensor if there is a single output, or
<ide> a list of tensors if there are more than one outputs.
<ide> """
<del> inputs = to_list(input)
<add> inputs = _to_list(input)
<ide> if mask is None:
<ide> masks = [None for _ in range(len(inputs))]
<ide> else:
<del> masks = to_list(mask)
<add> masks = _to_list(mask)
<ide> cache_key = ','.join([str(id(x)) for x in inputs])
<ide> cache_key += '_' + ','.join([str(id(x)) for x in masks])
<ide> if cache_key in self._output_tensor_cache:
<ide> def call(self, input, mask=None):
<ide> return output_tensors
<ide>
<ide> def compute_mask(self, input, mask):
<del> inputs = to_list(input)
<add> inputs = _to_list(input)
<ide> if mask is None:
<ide> masks = [None for _ in range(len(inputs))]
<ide> else:
<del> masks = to_list(mask)
<add> masks = _to_list(mask)
<ide> cache_key = ','.join([str(id(x)) for x in inputs])
<ide> cache_key += '_' + ','.join([str(id(x)) for x in masks])
<ide> if cache_key in self._output_mask_cache:
<ide> def compute_mask(self, input, mask):
<ide> return output_masks
<ide>
<ide> def get_output_shape_for(self, input_shape):
<del> input_shapes = to_list(input_shape)
<add> input_shapes = _to_list(input_shape)
<ide> if len(input_shapes) != len(self.input_layers):
<ide> raise ValueError('Invalid input_shape argument ' +
<ide> str(input_shape) + ': model has ' +
<ide> def get_output_shape_for(self, input_shape):
<ide> else:
<ide> output_shape = layer.get_output_shape_for(input_shapes)
<ide>
<del> output_shapes = to_list(output_shape)
<add> output_shapes = _to_list(output_shape)
<ide> node_index = layer.inbound_nodes.index(node)
<ide> for j in range(len(output_shapes)):
<ide> shape_key = layer.name + '_%s_%s' % (node_index, j)
<ide> def run_internal_graph(self, inputs, masks=None):
<ide> # call layer
<ide> if len(computed_data) == 1:
<ide> computed_tensor, computed_mask = computed_data[0]
<del> output_tensors = to_list(layer.masked_call(computed_tensor,
<del> mask=computed_mask))
<del> output_masks = to_list(layer.compute_mask(computed_tensor,
<del> computed_mask))
<add> output_tensors = _to_list(layer.masked_call(computed_tensor,
<add> mask=computed_mask))
<add> output_masks = _to_list(layer.compute_mask(computed_tensor,
<add> computed_mask))
<ide> computed_tensors = [computed_tensor]
<ide> computed_masks = [computed_mask]
<ide> else:
<ide> computed_tensors = [x[0] for x in computed_data]
<ide> computed_masks = [x[1] for x in computed_data]
<del> output_tensors = to_list(layer.masked_call(computed_tensors,
<del> mask=computed_masks))
<del> output_masks = to_list(layer.compute_mask(computed_tensors,
<del> computed_masks))
<add> output_tensors = _to_list(layer.masked_call(computed_tensors,
<add> mask=computed_masks))
<add> output_masks = _to_list(layer.compute_mask(computed_tensors,
<add> computed_masks))
<ide>
<ide> # Update model updates and losses:
<ide> layer_inputs = [x[0] for x in computed_data]
<ide> def run_internal_graph(self, inputs, masks=None):
<ide> # Update _keras_shape.
<ide> if all([hasattr(x, '_keras_shape') for x in computed_tensors]):
<ide> if len(computed_tensors) == 1:
<del> shapes = to_list(layer.get_output_shape_for(computed_tensors[0]._keras_shape))
<add> shapes = _to_list(layer.get_output_shape_for(computed_tensors[0]._keras_shape))
<ide> uses_learning_phase = computed_tensors[0]._uses_learning_phase
<ide> else:
<del> shapes = to_list(layer.get_output_shape_for([x._keras_shape for x in computed_tensors]))
<add> shapes = _to_list(layer.get_output_shape_for([x._keras_shape for x in computed_tensors]))
<ide> uses_learning_phase = any([x._uses_learning_phase for x in computed_tensors])
<ide> for x, s in zip(output_tensors, shapes):
<ide> x._keras_shape = s
<ide> def get_source_inputs(tensor, layer=None, node_index=None):
<ide> if x not in source_tensors:
<ide> source_tensors.append(x)
<ide> return source_tensors
<add>
<add>
<add>def _to_list(x):
<add> """This normalizes a list/tensor into a list.
<add>
<add> If a tensor is passed, we return
<add> a list of size 1 containing the tensor.
<add> """
<add> if isinstance(x, list):
<add> return x
<add> return [x]
<add>
<add>
<add>def _object_list_uid(object_list):
<add> object_list = _to_list(object_list)
<add> return ', '.join([str(abs(id(x))) for x in object_list])
<add>
<add>
<add>def _is_all_none(iterable_or_element):
<add> if not isinstance(iterable_or_element, (list, tuple)):
<add> iterable = [iterable_or_element]
<add> else:
<add> iterable = iterable_or_element
<add> for element in iterable:
<add> if element is not None:
<add> return False
<add> return True
<add>
<add>
<add>def _collect_previous_mask(input_tensors):
<add> # Return the output mask(s) of the previous node.
<add> input_tensors = _to_list(input_tensors)
<add> inbound_layers = []
<add> node_indices = []
<add> tensor_indices = []
<add> for x in input_tensors:
<add> if hasattr(x, '_keras_history'):
<add> inbound_layer, node_index, tensor_index = x._keras_history
<add> inbound_layers.append(inbound_layer)
<add> node_indices.append(node_index)
<add> tensor_indices.append(tensor_index)
<add> else:
<add> raise ValueError('Input tensor is not a Keras tensor:', x)
<add> nodes = [layer.inbound_nodes[i] for layer, i in zip(inbound_layers,
<add> node_indices)]
<add> masks = [node.output_masks[i] for node, i in zip(nodes, tensor_indices)]
<add> if len(masks) == 1:
<add> return masks[0]
<add> return masks
<add>
<add>
<add>def _to_snake_case(name):
<add> intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
<add> insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
<add> # If the class is private the name starts with "_" which is not secure
<add> # for creating scopes. We prefix the name with "private" in this case.
<add> if insecure[0] != '_':
<add> return insecure
<add> return 'private' + insecure
<add>
<add>
<add>def _collect_input_shape(input_tensors):
<add> # Return the output shape(s) of a list of Keras tensors.
<add> input_tensors = _to_list(input_tensors)
<add> shapes = []
<add> for x in input_tensors:
<add> if hasattr(x, '_keras_shape'):
<add> shapes.append(x._keras_shape)
<add> else:
<add> raise ValueError('Input tensor is not a Keras tensor:', x)
<add> if len(shapes) == 1:
<add> return shapes[0]
<add> return shapes
<ide><path>keras/layers/merge.py
<add>from ..engine.topology import Layer
<add>from .. import backend as K
<add>
<add>
<add>class _Merge(Layer):
<add> """TODO
<add> """
<add>
<add> def __init__(self, **kwargs):
<add> super(_Merge, self).__init__(**kwargs)
<add> self.supports_masking = True
<add>
<add> def _merge_function(self, inputs):
<add> raise NotImplementedError
<add>
<add> def build(self, input_shape):
<add> # Used purely for shape validation.
<add> if not isinstance(input_shape, list):
<add> raise ValueError('A merge layer should be called '
<add> 'on a list of inputs.')
<add> if len(input_shape) < 2:
<add> raise ValueError('A merge layer should be called '
<add> 'on a list of at least 2 inputs. '
<add> 'Got ' + str(len(input_shape)) + ' inputs.')
<add> # TODO: handle shapes with None entries.
<add> input_shapes_set = set(input_shape)
<add> if len(input_shapes_set) > 1:
<add> raise ValueError('Only tensors of same shape can '
<add> 'be merged by layer' + self.name +
<add> ' Got input shapes: %s' % input_shape)
<add>
<add> def call(self, inputs):
<add> return self._merge_function(inputs)
<add>
<add> def compute_mask(self, inputs, mask=None):
<add> if mask is not None:
<add> if not isinstance(mask, list):
<add> raise ValueError('`mask` should be a list.')
<add> if not isinstance(inputs, list):
<add> raise ValueError('`inputs` should be a list.')
<add> if len(mask) != len(inputs):
<add> raise ValueError('The lists `inputs` and `mask` '
<add> 'should have the same length.')
<add> masks = [K.expand_dims(m, 0) for m in mask if m is not None]
<add> return K.all(K.concatenate(masks, axis=0), axis=0, keepdims=False)
<add>
<add>
<add>class Sum(_Merge):
<add> """TODO
<add> """
<add>
<add> def _merge_function(self, inputs):
<add> output = inputs[0]
<add> for i in range(1, len(inputs)):
<add> output += inputs[i]
<add> return output
<add>
<add>
<add>class Multiply(_Merge):
<add> """TODO
<add> """
<add>
<add> def _merge_function(self, inputs):
<add> output = inputs[0]
<add> for i in range(1, len(inputs)):
<add> output *= inputs[i]
<add> return output
<add>
<add>
<add>class Average(_Merge):
<add> """TODO
<add> """
<add>
<add> def _merge_function(self, inputs):
<add> output = inputs[0]
<add> for i in range(1, len(inputs)):
<add> output *= inputs[i]
<add> return output / len(inputs)
<add>
<add>
<add>class Maximum(_Merge):
<add> """TODO
<add> """
<add>
<add> def _merge_function(self, inputs):
<add> output = inputs[0]
<add> for i in range(1, len(inputs)):
<add> output = K.maximum(output, inputs[i])
<add> return output
<add>
<add>
<add>class Concatenate(_Merge):
<add> """TODO
<add> """
<add>
<add> def __init__(self, axis=-1, **kwargs):
<add> super(Concatenate, self).__init__(**kwargs)
<add> self.axis = axis
<add> self.supports_masking = True
<add>
<add> def build(self, input_shape):
<add> # Used purely for shape validation.
<add> if not isinstance(input_shape, list) or len(input_shape) != 2:
<add> raise ValueError('`Concatenate` layer should be called '
<add> 'on a list of 2 inputs.')
<add> reduced_inputs_shapes = [list(shape) for shape in input_shape]
<add> shape_set = set()
<add> for i in range(len(reduced_inputs_shapes)):
<add> del reduced_inputs_shapes[i][self.axis]
<add> shape_set.add(tuple(reduced_inputs_shapes[i]))
<add> if len(shape_set) > 1:
<add> raise ValueError('`Concatenate` layer requires '
<add> 'inputs with matching shapes '
<add> 'except for the concat axis. '
<add> 'Got inputs shapes: %s' % (input_shape))
<add>
<add> def call(self, inputs):
<add> if not isinstance(inputs, list):
<add> raise ValueError('A `Concatenate` layer should be called '
<add> 'on a list of 2 inputs.')
<add> return K.concatenate(inputs, axis=self.axis)
<add>
<add> def get_output_shape_for(self, input_shape):
<add> if not isinstance(input_shape, list) or len(input_shape) != 2:
<add> raise ValueError('A `Concatenate` layer should be called '
<add> 'on a list of 2 inputs.')
<add> input_shapes = input_shape
<add> output_shape = list(input_shapes[0])
<add> for shape in input_shapes[1:]:
<add> if output_shape[self.axis] is None or shape[self.axis] is None:
<add> output_shape[self.axis] = None
<add> break
<add> output_shape[self.axis] += shape[self.axis]
<add> return tuple(output_shape)
<add>
<add> def compute_mask(self, inputs, mask=None):
<add> if mask is None:
<add> return None
<add> if not isinstance(inputs, list):
<add> raise ValueError('A `Concatenate` layer should be called '
<add> 'on a list of 2 inputs.')
<add> if not isinstance(mask, list):
<add> raise ValueError('TODO')
<add> if len(inputs) != len(mask):
<add> raise ValueError('TODO')
<add> # Make a list of masks while making sure
<add> # the dimensionality of each mask
<add> # is the same as the corresponding input.
<add> masks = []
<add> for input_i, mask_i in zip(inputs, mask):
<add> if mask_i is None:
<add> # Input is unmasked. Append all 1s to masks,
<add> # but cast it to uint8 first
<add> masks.append(K.cast(K.ones_like(input_i), 'uint8'))
<add> elif K.ndim(mask_i) < K.ndim(input_i):
<add> # Mask is smaller than the input, expand it
<add> masks.append(K.expand_dims(mask_i))
<add> else:
<add> masks.append(mask_i)
<add> concatenated = K.concatenate(masks, axis=self.axis)
<add> return K.all(concatenated, axis=-1, keepdims=False)
<add>
<add> def get_config(self):
<add> config = {
<add> 'axis': self.axis,
<add> }
<add> base_config = super(Dot, self).get_config()
<add> return dict(list(base_config.items()) + list(config.items()))
<add>
<add>
<add>class Dot(_Merge):
<add> """TODO
<add> """
<add>
<add> def __init__(self, axes, normalize=False, **kwargs):
<add> super(Dot, self).__init__(**kwargs)
<add> if not isinstance(axes, int):
<add> if not isinstance(axes, (list, tuple)):
<add> raise TypeError('Invalid type for `axes` - '
<add> 'should be a list or an int.')
<add> if len(axes) != 2:
<add> raise ValueError('Invalid format for `axes` - '
<add> 'should contain two elements.')
<add> if not isinstance(axes[0], int) or not isinstance(axes[1], int):
<add> raise ValueError('Invalid format for `axes` - '
<add> 'list elements should be "int".')
<add> self.axes = axes
<add> self.normalize = normalize
<add> self.supports_masking = True
<add>
<add> def build(self, input_shape):
<add> # Used purely for shape validation.
<add> if not isinstance(input_shape, list) or len(input_shape) != 2:
<add> raise ValueError('A `Dot` layer should be called '
<add> 'on a list of 2 inputs.')
<add> shape1 = input_shape[0]
<add> shape2 = input_shape[1]
<add> if isinstance(self.axes, int):
<add> if self.axes < 0:
<add> axes = [self.axes % len(shape1), self.axes % len(shape2)]
<add> else:
<add> axes = [self.axes] * 2
<add> else:
<add> axes = self.axes
<add> if shape1[axes[0]] != shape2[axes[1]]:
<add> raise ValueError(
<add> 'Dimension incompatibility '
<add> '%s != %s. ' % (shape1[axes[0]], shape2[axes[1]]) +
<add> 'Layer shapes: %s, %s' % (shape1, shape2))
<add>
<add> def call(self, inputs):
<add> x1 = inputs[0]
<add> x2 = inputs[1]
<add> if isinstance(self.axes, int):
<add> if self.axes < 0:
<add> axes = [self.axes % K.ndim(x1), self.axes % K.ndim(x2)]
<add> else:
<add> axes = [self.axes] * 2
<add> else:
<add> axes = self.axes
<add> if self.normalize:
<add> x1 = K.l2_normalize(x1, axis=axes[0])
<add> x2 = K.l2_normalize(x2, axis=axes[1])
<add> output = K.batch_dot(x1, x2, axes)
<add> return output
<add>
<add> def get_output_shape_for(self, input_shape):
<add> if not isinstance(input_shape, list) or len(input_shape) != 2:
<add> raise ValueError('A `Dot` layer should be called '
<add> 'on a list of 2 inputs.')
<add> shape1 = list(input_shape[0])
<add> shape2 = list(input_shape[1])
<add> shape1.pop(self.axes[0])
<add> shape2.pop(self.axes[1])
<add> shape2.pop(0)
<add> output_shape = shape1 + shape2
<add> if len(output_shape) == 1:
<add> output_shape += [1]
<add> return tuple(output_shape)
<add>
<add> def compute_mask(self, inputs, mask=None):
<add> return None
<add>
<add> def get_config(self):
<add> config = {
<add> 'axes': self.axes,
<add> 'normalize': self.normalize,
<add> }
<add> base_config = super(Dot, self).get_config()
<add> return dict(list(base_config.items()) + list(config.items()))
<add>
<add>
<add>def sum(inputs):
<add> """TODO
<add> """
<add> return Sum()(inputs)
<add>
<add>
<add>def multiply(inputs):
<add> """TODO
<add> """
<add> return Multiply()(inputs)
<add>
<add>
<add>def average(inputs):
<add> """TODO
<add> """
<add> return Average()(inputs)
<add>
<add>
<add>def maximum(inputs):
<add> """TODO
<add> """
<add> return Maximum()(inputs)
<add>
<add>
<add>def concatenate(inputs, axis=-1):
<add> """TODO
<add> """
<add> return Concatenate(axis=axis)(inputs)
<add>
<add>
<add>def dot(inputs, axes, normalize=False):
<add> """TODO
<add> """
<add> return Dot(axes=axes, normalize=normalize)(inputs)
| 2
|
Javascript
|
Javascript
|
allow nginclude on css class
|
428f2b563663315df4f235ca19cef4bdcf82e2ab
|
<ide><path>src/ng/directive/ngInclude.js
<ide> /**
<ide> * @ngdoc directive
<ide> * @name angular.module.ng.$compileProvider.directive.ng-include
<del> * @restrict EA
<add> * @restrict ECA
<ide> *
<ide> * @description
<ide> * Fetches, compiles and includes an external HTML fragment.
<ide> var ngIncludeDirective = ['$http', '$templateCache', '$anchorScroll', '$compile',
<ide> function($http, $templateCache, $anchorScroll, $compile) {
<ide> return {
<del> restrict: 'EA',
<add> restrict: 'ECA',
<add> terminal: true,
<ide> compile: function(element, attr) {
<ide> var srcExp = attr.ngInclude || attr.src,
<ide> onloadExp = attr.onload || '',
| 1
|
Java
|
Java
|
change mediatypefactory to return optional
|
fd1db57e058c240d6e953940b9147d233c9d85fe
|
<ide><path>spring-test/src/main/java/org/springframework/mock/web/MockServletContext.java
<ide> public int getEffectiveMinorVersion() {
<ide> @Override
<ide> public String getMimeType(String filePath) {
<ide> String extension = StringUtils.getFilenameExtension(filePath);
<del> MediaType result;
<ide> if (this.mimeTypes.containsKey(extension)) {
<del> result = this.mimeTypes.get(extension);
<add> return this.mimeTypes.get(extension).toString();
<ide> }
<ide> else {
<del> result = MediaTypeFactory.getMediaType(filePath);
<add> return MediaTypeFactory.getMediaType(filePath).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
<ide> }
<del> return result != null ? result.toString() : null;
<ide> }
<ide>
<ide> /**
<ide><path>spring-web/src/main/java/org/springframework/http/MediaTypeFactory.java
<ide> private static MultiValueMap<String, MediaType> parseMimeTypes() {
<ide> * @param resource the resource to introspect
<ide> * @return the corresponding media type, or {@code null} if none found
<ide> */
<del> public static MediaType getMediaType(Resource resource) {
<add> public static Optional<MediaType> getMediaType(Resource resource) {
<add> if (resource == null) {
<add> return Optional.empty();
<add> }
<ide> String filename = resource.getFilename();
<del> return (filename != null ? getMediaType(filename) : null);
<add> return (filename != null ? getMediaType(filename) : Optional.empty());
<ide> }
<ide>
<ide> /**
<ide> * Determine a media type for the given file name, if possible.
<ide> * @param filename the file name plus extension
<ide> * @return the corresponding media type, or {@code null} if none found
<ide> */
<del> public static MediaType getMediaType(String filename) {
<add> public static Optional<MediaType> getMediaType(String filename) {
<ide> List<MediaType> mediaTypes = getMediaTypes(filename);
<del> return (!mediaTypes.isEmpty() ? mediaTypes.get(0) : null);
<add> return (!mediaTypes.isEmpty() ? Optional.of(mediaTypes.get(0)) : Optional.empty());
<ide> }
<ide>
<ide> /**
<ide><path>spring-web/src/main/java/org/springframework/http/codec/ResourceHttpMessageWriter.java
<ide> private static MediaType getResourceMediaType(MediaType type, Resource resource)
<ide> if (type != null && type.isConcrete() && !type.equals(MediaType.APPLICATION_OCTET_STREAM)) {
<ide> return type;
<ide> }
<del> type = MediaTypeFactory.getMediaType(resource);
<del> return type != null ? type : MediaType.APPLICATION_OCTET_STREAM;
<add> return MediaTypeFactory.getMediaType(resource).orElse(MediaType.APPLICATION_OCTET_STREAM);
<ide> }
<ide>
<ide> private static OptionalLong lengthOf(Resource resource) {
<ide><path>spring-web/src/main/java/org/springframework/http/converter/ResourceHttpMessageConverter.java
<ide> public String getFilename() {
<ide>
<ide> @Override
<ide> protected MediaType getDefaultContentType(Resource resource) {
<del> MediaType mediaType = MediaTypeFactory.getMediaType(resource);
<del> return mediaType != null ? mediaType : MediaType.APPLICATION_OCTET_STREAM;
<add> return MediaTypeFactory.getMediaType(resource).orElse(MediaType.APPLICATION_OCTET_STREAM);
<ide> }
<ide>
<ide> @Override
<ide><path>spring-web/src/main/java/org/springframework/http/converter/ResourceRegionHttpMessageConverter.java
<ide> protected MediaType getDefaultContentType(Object object) {
<ide> resource = regions.iterator().next().getResource();
<ide> }
<ide> }
<del> MediaType result = null;
<del> if (resource != null) {
<del> result = MediaTypeFactory.getMediaType(resource);
<del> }
<del> if (result == null) {
<del> return MediaType.APPLICATION_OCTET_STREAM;
<del> }
<del> return result;
<add> return MediaTypeFactory.getMediaType(resource).orElse(MediaType.APPLICATION_OCTET_STREAM);
<ide> }
<ide>
<ide> @Override
<ide><path>spring-web/src/main/java/org/springframework/web/accept/PathExtensionContentNegotiationStrategy.java
<ide>
<ide> import java.util.Locale;
<ide> import java.util.Map;
<add>import java.util.Optional;
<ide> import javax.servlet.http.HttpServletRequest;
<ide>
<ide> import org.apache.commons.logging.Log;
<ide> protected String getMediaTypeKey(NativeWebRequest webRequest) {
<ide> protected MediaType handleNoMatch(NativeWebRequest webRequest, String extension)
<ide> throws HttpMediaTypeNotAcceptableException {
<ide>
<del> MediaType mediaType = MediaTypeFactory.getMediaType("file." + extension);
<del> if (mediaType != null) {
<del> return mediaType;
<add> Optional<MediaType> mediaType = MediaTypeFactory.getMediaType("file." + extension);
<add> if (mediaType.isPresent()) {
<add> return mediaType.get();
<ide> }
<ide> if (this.ignoreUnknownExtensions) {
<ide> return null;
<ide> public MediaType getMediaTypeForResource(Resource resource) {
<ide> mediaType = lookupMediaType(extension);
<ide> }
<ide> if (mediaType == null) {
<del> mediaType = MediaTypeFactory.getMediaType(filename);
<add> mediaType = MediaTypeFactory.getMediaType(filename).orElse(null);
<ide> }
<ide> return mediaType;
<ide> }
<ide><path>spring-web/src/test/java/org/springframework/http/MediaTypeFactoryTests.java
<ide>
<ide> import org.junit.Test;
<ide>
<del>import static org.junit.Assert.assertEquals;
<add>import org.springframework.core.io.Resource;
<add>
<add>import static org.junit.Assert.*;
<ide>
<ide> /**
<ide> * @author Arjen Poutsma
<ide> public class MediaTypeFactoryTests {
<ide>
<ide> @Test
<ide> public void getMediaType() {
<del> assertEquals(MediaType.APPLICATION_XML, MediaTypeFactory.getMediaType("file.xml"));
<del> assertEquals(MediaType.parseMediaType("application/javascript"), MediaTypeFactory.getMediaType("file.js"));
<del> assertEquals(MediaType.parseMediaType("text/css"), MediaTypeFactory.getMediaType("file.css"));
<add> assertEquals(MediaType.APPLICATION_XML, MediaTypeFactory.getMediaType("file.xml").get());
<add> assertEquals(MediaType.parseMediaType("application/javascript"), MediaTypeFactory.getMediaType("file.js").get());
<add> assertEquals(MediaType.parseMediaType("text/css"), MediaTypeFactory.getMediaType("file.css").get());
<add> assertFalse(MediaTypeFactory.getMediaType("file.foobar").isPresent());
<add> }
<add>
<add> @Test
<add> public void nullParameter() {
<add> assertFalse(MediaTypeFactory.getMediaType((String) null).isPresent());
<add> assertFalse(MediaTypeFactory.getMediaType((Resource) null).isPresent());
<add> assertTrue(MediaTypeFactory.getMediaTypes(null).isEmpty());
<ide> }
<ide>
<ide> }
<ide>\ No newline at end of file
<ide><path>spring-web/src/test/java/org/springframework/mock/web/test/MockServletContext.java
<ide> public int getEffectiveMinorVersion() {
<ide> @Override
<ide> public String getMimeType(String filePath) {
<ide> String extension = StringUtils.getFilenameExtension(filePath);
<del> MediaType result;
<ide> if (this.mimeTypes.containsKey(extension)) {
<del> result = this.mimeTypes.get(extension);
<add> return this.mimeTypes.get(extension).toString();
<ide> }
<ide> else {
<del> result = MediaTypeFactory.getMediaType(filePath);
<add> return MediaTypeFactory.getMediaType(filePath).orElse(MediaType.APPLICATION_OCTET_STREAM).toString();
<ide> }
<del> return result != null ? result.toString() : null;
<ide> }
<ide>
<ide> /**
<ide><path>spring-webflux/src/main/java/org/springframework/web/reactive/accept/PathExtensionContentTypeResolver.java
<ide>
<ide> import java.util.Locale;
<ide> import java.util.Map;
<add>import java.util.Optional;
<ide>
<ide> import org.springframework.core.io.Resource;
<ide> import org.springframework.http.MediaType;
<ide> protected String extractKey(ServerWebExchange exchange) {
<ide>
<ide> @Override
<ide> protected MediaType handleNoMatch(String key) throws NotAcceptableStatusException {
<del> MediaType mediaType = MediaTypeFactory.getMediaType("file." + key);
<del> if (mediaType != null) {
<del> return mediaType;
<add> Optional<MediaType> mediaType = MediaTypeFactory.getMediaType("file." + key);
<add> if (mediaType.isPresent()) {
<add> return mediaType.get();
<ide> }
<ide> if (!this.ignoreUnknownExtensions) {
<ide> throw new NotAcceptableStatusException(getAllMediaTypes());
<ide> public MediaType resolveMediaTypeForResource(Resource resource) {
<ide> mediaType = getMediaType(extension);
<ide> }
<ide> if (mediaType == null) {
<del> mediaType = MediaTypeFactory.getMediaType(filename);
<add> mediaType = MediaTypeFactory.getMediaType(filename).orElse(null);
<ide> }
<ide> return mediaType;
<ide> }
| 9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.