content_type
stringclasses
8 values
main_lang
stringclasses
7 values
message
stringlengths
1
50
sha
stringlengths
40
40
patch
stringlengths
52
962k
file_count
int64
1
300
Javascript
Javascript
offset the no of donations on prod
9fb9ce3085585287516def3c519e82f3575619be
<ide><path>client/src/redux/index.js <ide> export const userSelector = state => { <ide> }; <ide> <ide> export const sessionMetaSelector = state => state[ns].sessionMeta; <del>export const activeDonationsSelector = state => <del> Number(sessionMetaSelector(state).activeDonations) + <del> Number(PAYPAL_SUPPORTERS || 0); <add>export const activeDonationsSelector = state => { <add> const donors = <add> Number(sessionMetaSelector(state).activeDonations) + <add> Number(PAYPAL_SUPPORTERS || 0) - <add> // Note 1: <add> // Offset the no of inactive donations, that are not yet normalized in db <add> // TODO: This data needs to be fetched and updated in db from Stripe <add> 2500; <add> // Note 2: <add> // Due to the offset above, non-prod data needs to be adjusted for -ve values <add> return donors > 0 <add> ? donors <add> : Number(sessionMetaSelector(state).activeDonations); <add>}; <ide> <ide> function spreadThePayloadOnUser(state, payload) { <ide> return {
1
Python
Python
fix tiny errors
efe7c3ea6fd4ab3b9b78ff492e652161a748d0b6
<ide><path>research/object_detection/utils/spatial_transform_ops.py <ide> def multilevel_native_crop_and_resize(images, boxes, box_levels, <ide> crop_size, scope=None): <ide> #FIXME: fix docstring <ide> """doc string.""" <del> if not box_levels: <add> if box_levels is None: <ide> return native_crop_and_resize(images[0], boxes, crop_size, scope=None) <ide> croped_feature_list = [] <ide> for level, image in enumerate(images): <ide> def multilevel_native_crop_and_resize(images, boxes, box_levels, <ide> croped_feature_list.append(cropped) <ide> return tf.concat(croped_feature_list, axis=0) <ide> <add> <ide> def native_crop_and_resize(image, boxes, crop_size, scope=None): <ide> """Same as `matmul_crop_and_resize` but uses tf.image.crop_and_resize.""" <ide> def get_box_inds(proposals): <ide> def get_box_inds(proposals): <ide> tf.shape(cropped_regions)[1:]], axis=0) <ide> return tf.reshape(cropped_regions, final_shape) <ide> <add> <ide> def multilevel_matmul_crop_and_resize(images, boxes, box_levels, crop_size, <ide> extrapolation_value=0.0, scope=None): <ide> #FIXME: fix docstring <ide> """doc string.""" <ide> with tf.name_scope(scope, 'MatMulCropAndResize'): <ide> if box_levels is None: <ide> box_levels = tf.zeros(tf.shape(boxes)[:2], dtype=tf.int32) <del> return multilevel_roi_align([image], <add> return multilevel_roi_align(images, <ide> boxes, <ide> box_levels, <ide> crop_size,
1
Javascript
Javascript
replace recursion with loop
67077b7183d9aa9f88af8124de22f817f5515681
<ide><path>lib/ExportsInfo.js <ide> class ExportInfo { <ide> const resolveTarget = (target, alreadyVisited) => { <ide> if (!target) return null; <ide> if (!target.export) return target; <del> const exportsInfo = moduleGraph.getExportsInfo(target.module); <del> const exportInfo = exportsInfo.getExportInfo(target.export[0]); <del> if (!exportInfo) return target; <del> if (alreadyVisited.has(exportInfo)) return null; <del> const newTarget = exportInfo.getTarget(moduleGraph, alreadyVisited); <del> if (newTarget) { <add> let alreadyVisitedOwned = false; <add> for (;;) { <add> const exportsInfo = moduleGraph.getExportsInfo(target.module); <add> const exportInfo = exportsInfo.getExportInfo(target.export[0]); <add> if (!exportInfo) return target; <add> if (alreadyVisited.has(exportInfo)) return null; <add> const newTarget = exportInfo.getTarget(moduleGraph, alreadyVisited); <add> if (!newTarget) return target; <ide> if (newTarget.export || target.export.length === 1) return newTarget; <ide> target = { <ide> module: newTarget.module, <ide> export: target.export.slice(1) <ide> }; <del> const newAlreadyVisited = new Set([...alreadyVisited, exportInfo]); <del> return resolveTarget(target, newAlreadyVisited); <add> if (!alreadyVisitedOwned) { <add> alreadyVisited = new Set(alreadyVisited); <add> alreadyVisitedOwned = true; <add> } <add> alreadyVisited.add(exportInfo); <ide> } <del> return target; <ide> }; <ide> <ide> if (!this._target || this._target.size === 0) return undefined; <ide> const newAlreadyVisited = new Set(alreadyVisited); <ide> newAlreadyVisited.add(this); <del> if (this._target.size === 1) <add> if (this._target.size === 1) { <ide> return resolveTarget( <ide> this._target.values().next().value, <ide> newAlreadyVisited <ide> ); <add> } <ide> const values = this._target.values(); <ide> const target = resolveTarget(values.next().value, newAlreadyVisited); <ide> if (target === null) return undefined;
1
Ruby
Ruby
require the count_down_latch
175515ad8bdbe1f87a52c3023d35d434b48e35fc
<ide><path>activerecord/test/cases/migration_test.rb <del>require "cases/helper" <del>require "cases/migration/helper" <add>require 'cases/helper' <add>require 'cases/migration/helper' <ide> require 'bigdecimal/util' <add>require 'concurrent/atomic/count_down_latch' <ide> <ide> require 'models/person' <ide> require 'models/topic'
1
Text
Text
add link back to home page from docs
053729ac44b6a16ce957142fdcacaa5bfd9d8d79
<ide><path>docs/SUMMARY.md <ide> # Summary <ide> <del>* [Chart.js](README.md) <add>* [Chart.js](https://www.chartjs.org) <add>* [Introduction](README.md) <ide> * [Getting Started](getting-started/README.md) <ide> * [Installation](getting-started/installation.md) <ide> * [Integration](getting-started/integration.md)
1
PHP
PHP
add tests for support\optional
4a49ba5f2e86d29f8f07dc65640460822ee98bd0
<ide><path>tests/Support/SupportOptionalTest.php <add><?php <add> <add>namespace Illuminate\Tests\Support; <add> <add>use PHPUnit\Framework\TestCase; <add>use Illuminate\Support\Optional; <add> <add>class SupportOptionalTest extends TestCase <add>{ <add> public function testGetExistItemOnObject() <add> { <add> $expected = 'test'; <add> <add> $targetObj = new \stdClass; <add> $targetObj->item = $expected; <add> <add> $optional = new Optional($targetObj); <add> <add> $this->assertEquals($expected, $optional->item); <add> } <add> <add> public function testGetNotExistItemOnObject() <add> { <add> $targetObj = new \stdClass; <add> <add> $optional = new Optional($targetObj); <add> <add> $this->assertNull($optional->item); <add> } <add> <add> public function testGetExistItemOnArray() <add> { <add> $expected = 'test'; <add> <add> $targetArr = [ <add> 'item' => $expected, <add> ]; <add> <add> $optional = new Optional($targetArr); <add> <add> $this->assertEquals($expected, $optional['item']); <add> } <add> <add> public function testGetNotExistItemOnArray() <add> { <add> $targetObj = []; <add> <add> $optional = new Optional($targetObj); <add> <add> $this->assertNull($optional['item']); <add> } <add>}
1
Ruby
Ruby
handle ranges with excluded end
233ee77f4511255ff2ff7c0b0ebf1cee13e7fc10
<ide><path>lib/arel/algebra/attributes/attribute.rb <ide> def matches(regexp) <ide> end <ide> <ide> def in(array) <del> Predicates::In.new(self, array) <add> if array.is_a?(Range) && array.exclude_end? <add> [Predicates::GreaterThanOrEqualTo.new(self, array.begin), Predicates::LessThan.new(self, array.end)] <add> else <add> Predicates::In.new(self, array) <add> end <ide> end <ide> end <ide> include Predications
1
Python
Python
fix spelling of develop
9a978c61193a9a4c7ef5a96cdaa8b9aef4b38dda
<ide><path>runtests.py <ide> def main(argv): <ide> "Note that you need to commit your changes first!")) <ide> parser.add_argument("--raise-warnings", default=None, type=str, <ide> choices=('develop', 'release'), <del> help="if 'develop', warnings are treated as errors; " <del> "defaults to 'devlop' in development versions.") <add> help=("if 'develop', warnings are treated as errors; " <add> "defaults to 'develop' in development versions.")) <ide> parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER, <ide> help="Arguments to pass to Nose, Python or shell") <ide> args = parser.parse_args(argv)
1
Mixed
Go
remove pkg/discovery as it's now unused
e202ac3f3857d422bb65a86f93898a9b404cc1e3
<ide><path>pkg/discovery/README.md <del>--- <del>page_title: Docker discovery <del>page_description: discovery <del>page_keywords: docker, clustering, discovery <del>--- <del> <del># Discovery <del> <del>Docker comes with multiple Discovery backends. <del> <del>## Backends <del> <del>### Using etcd <del> <del>Point your Docker Engine instances to a common etcd instance. You can specify <del>the address Docker uses to advertise the node using the `--cluster-advertise` <del>flag. <del> <del>```bash <del>$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store etcd://<etcd_ip1>,<etcd_ip2>/<path> <del>``` <del> <del>### Using consul <del> <del>Point your Docker Engine instances to a common Consul instance. You can specify <del>the address Docker uses to advertise the node using the `--cluster-advertise` <del>flag. <del> <del>```bash <del>$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store consul://<consul_ip>/<path> <del>``` <del> <del>### Using zookeeper <del> <del>Point your Docker Engine instances to a common Zookeeper instance. You can specify <del>the address Docker uses to advertise the node using the `--cluster-advertise` <del>flag. <del> <del>```bash <del>$ dockerd -H=<node_ip:2376> --cluster-advertise=<node_ip:2376> --cluster-store zk://<zk_addr1>,<zk_addr2>/<path> <del>``` <ide><path>pkg/discovery/backends.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del> <del>import ( <del> "fmt" <del> "net" <del> "strings" <del> "time" <del> <del> "github.com/sirupsen/logrus" <del>) <del> <del>var ( <del> // Backends is a global map of discovery backends indexed by their <del> // associated scheme. <del> backends = make(map[string]Backend) <del>) <del> <del>// Register makes a discovery backend available by the provided scheme. <del>// If Register is called twice with the same scheme an error is returned. <del>func Register(scheme string, d Backend) error { <del> if _, exists := backends[scheme]; exists { <del> return fmt.Errorf("scheme already registered %s", scheme) <del> } <del> logrus.WithField("name", scheme).Debugf("Registering discovery service") <del> backends[scheme] = d <del> return nil <del>} <del> <del>func parse(rawurl string) (string, string) { <del> parts := strings.SplitN(rawurl, "://", 2) <del> <del> // nodes:port,node2:port => nodes://node1:port,node2:port <del> if len(parts) == 1 { <del> return "nodes", parts[0] <del> } <del> return parts[0], parts[1] <del>} <del> <del>// ParseAdvertise parses the --cluster-advertise daemon config which accepts <del>// <ip-address>:<port> or <interface-name>:<port> <del>func ParseAdvertise(advertise string) (string, error) { <del> var ( <del> iface *net.Interface <del> addrs []net.Addr <del> err error <del> ) <del> <del> addr, port, err := net.SplitHostPort(advertise) <del> <del> if err != nil { <del> return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) <del> } <del> <del> ip := net.ParseIP(addr) <del> // If it is a valid ip-address, use it as is <del> if ip != nil { <del> return advertise, nil <del> } <del> <del> // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise <del> ifaceName := addr <del> iface, err = net.InterfaceByName(ifaceName) <del> if err != nil { <del> return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) <del> } <del> <del> addrs, err = iface.Addrs() <del> if err != nil { <del> return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) <del> } <del> <del> if len(addrs) == 0 { <del> return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) <del> } <del> <del> addr = "" <del> for _, a := range addrs { <del> ip, _, err := net.ParseCIDR(a.String()) <del> if err != nil { <del> return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) <del> } <del> if ip.To4() == nil || ip.IsLoopback() { <del> continue <del> } <del> addr = ip.String() <del> break <del> } <del> if addr == "" { <del> return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) <del> } <del> <del> addr = net.JoinHostPort(addr, port) <del> return addr, nil <del>} <del> <del>// New returns a new Discovery given a URL, heartbeat and ttl settings. <del>// Returns an error if the URL scheme is not supported. <del>func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { <del> scheme, uri := parse(rawurl) <del> if backend, exists := backends[scheme]; exists { <del> logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") <del> err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) <del> return backend, err <del> } <del> <del> return nil, ErrNotSupported <del>} <ide><path>pkg/discovery/discovery.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del> <del>import ( <del> "errors" <del> "time" <del>) <del> <del>var ( <del> // ErrNotSupported is returned when a discovery service is not supported. <del> ErrNotSupported = errors.New("discovery service not supported") <del> <del> // ErrNotImplemented is returned when discovery feature is not implemented <del> // by discovery backend. <del> ErrNotImplemented = errors.New("not implemented in this discovery service") <del>) <del> <del>// Watcher provides watching over a cluster for nodes joining and leaving. <del>type Watcher interface { <del> // Watch the discovery for entry changes. <del> // Returns a channel that will receive changes or an error. <del> // Providing a non-nil stopCh can be used to stop watching. <del> Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) <del>} <del> <del>// Backend is implemented by discovery backends which manage cluster entries. <del>type Backend interface { <del> // Watcher must be provided by every backend. <del> Watcher <del> <del> // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. <del> Initialize(string, time.Duration, time.Duration, map[string]string) error <del> <del> // Register to the discovery. <del> Register(string) error <del>} <ide><path>pkg/discovery/discovery_test.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del> <del>import ( <del> "testing" <del> <del> "github.com/docker/docker/internal/test/suite" <del> "gotest.tools/v3/assert" <del>) <del> <del>// Hook up gocheck into the "go test" runner. <del>func Test(t *testing.T) { <del> suite.Run(t, &DiscoverySuite{}) <del>} <del> <del>type DiscoverySuite struct{} <del> <del>func (s *DiscoverySuite) TestNewEntry(c *testing.T) { <del> entry, err := NewEntry("127.0.0.1:2375") <del> assert.Assert(c, err == nil) <del> assert.Equal(c, entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), true) <del> assert.Equal(c, entry.String(), "127.0.0.1:2375") <del> <del> entry, err = NewEntry("[2001:db8:0:f101::2]:2375") <del> assert.Assert(c, err == nil) <del> assert.Equal(c, entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), true) <del> assert.Equal(c, entry.String(), "[2001:db8:0:f101::2]:2375") <del> <del> _, err = NewEntry("127.0.0.1") <del> assert.Assert(c, err != nil) <del>} <del> <del>func (s *DiscoverySuite) TestParse(c *testing.T) { <del> scheme, uri := parse("127.0.0.1:2375") <del> assert.Equal(c, scheme, "nodes") <del> assert.Equal(c, uri, "127.0.0.1:2375") <del> <del> scheme, uri = parse("localhost:2375") <del> assert.Equal(c, scheme, "nodes") <del> assert.Equal(c, uri, "localhost:2375") <del> <del> scheme, uri = parse("scheme://127.0.0.1:2375") <del> assert.Equal(c, scheme, "scheme") <del> assert.Equal(c, uri, "127.0.0.1:2375") <del> <del> scheme, uri = parse("scheme://localhost:2375") <del> assert.Equal(c, scheme, "scheme") <del> assert.Equal(c, uri, "localhost:2375") <del> <del> scheme, uri = parse("") <del> assert.Equal(c, scheme, "nodes") <del> assert.Equal(c, uri, "") <del>} <del> <del>func (s *DiscoverySuite) TestCreateEntries(c *testing.T) { <del> entries, err := CreateEntries(nil) <del> assert.DeepEqual(c, entries, Entries{}) <del> assert.Assert(c, err == nil) <del> <del> entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) <del> assert.Assert(c, err == nil) <del> expected := Entries{ <del> &Entry{Host: "127.0.0.1", Port: "2375"}, <del> &Entry{Host: "127.0.0.2", Port: "2375"}, <del> &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, <del> } <del> assert.Equal(c, entries.Equals(expected), true) <del> <del> _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) <del> assert.Assert(c, err != nil) <del>} <del> <del>func (s *DiscoverySuite) TestContainsEntry(c *testing.T) { <del> entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) <del> assert.Assert(c, err == nil) <del> assert.Equal(c, entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), true) <del> assert.Equal(c, entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), false) <del>} <del> <del>func (s *DiscoverySuite) TestEntriesEquality(c *testing.T) { <del> entries := Entries{ <del> &Entry{Host: "127.0.0.1", Port: "2375"}, <del> &Entry{Host: "127.0.0.2", Port: "2375"}, <del> } <del> <del> // Same <del> assert.Assert(c, entries.Equals(Entries{ <del> &Entry{Host: "127.0.0.1", Port: "2375"}, <del> &Entry{Host: "127.0.0.2", Port: "2375"}, <del> })) <del> <del> // Different size <del> assert.Assert(c, !entries.Equals(Entries{ <del> &Entry{Host: "127.0.0.1", Port: "2375"}, <del> &Entry{Host: "127.0.0.2", Port: "2375"}, <del> &Entry{Host: "127.0.0.3", Port: "2375"}, <del> })) <del> <del> // Different content <del> assert.Assert(c, !entries.Equals(Entries{ <del> &Entry{Host: "127.0.0.1", Port: "2375"}, <del> &Entry{Host: "127.0.0.42", Port: "2375"}, <del> })) <del> <del>} <del> <del>func (s *DiscoverySuite) TestEntriesDiff(c *testing.T) { <del> entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} <del> entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} <del> entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} <del> entries := Entries{entry1, entry2} <del> <del> // No diff <del> added, removed := entries.Diff(Entries{entry2, entry1}) <del> assert.Equal(c, len(added), 0) <del> assert.Equal(c, len(removed), 0) <del> <del> // Add <del> added, removed = entries.Diff(Entries{entry2, entry3, entry1}) <del> assert.Equal(c, len(added), 1) <del> assert.Equal(c, added.Contains(entry3), true) <del> assert.Equal(c, len(removed), 0) <del> <del> // Remove <del> added, removed = entries.Diff(Entries{entry2}) <del> assert.Equal(c, len(added), 0) <del> assert.Equal(c, len(removed), 1) <del> assert.Equal(c, removed.Contains(entry1), true) <del> <del> // Add and remove <del> added, removed = entries.Diff(Entries{entry1, entry3}) <del> assert.Equal(c, len(added), 1) <del> assert.Equal(c, added.Contains(entry3), true) <del> assert.Equal(c, len(removed), 1) <del> assert.Equal(c, removed.Contains(entry2), true) <del>} <ide><path>pkg/discovery/entry.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del> <del>import "net" <del> <del>// NewEntry creates a new entry. <del>func NewEntry(url string) (*Entry, error) { <del> host, port, err := net.SplitHostPort(url) <del> if err != nil { <del> return nil, err <del> } <del> return &Entry{host, port}, nil <del>} <del> <del>// An Entry represents a host. <del>type Entry struct { <del> Host string <del> Port string <del>} <del> <del>// Equals returns true if cmp contains the same data. <del>func (e *Entry) Equals(cmp *Entry) bool { <del> return e.Host == cmp.Host && e.Port == cmp.Port <del>} <del> <del>// String returns the string form of an entry. <del>func (e *Entry) String() string { <del> return net.JoinHostPort(e.Host, e.Port) <del>} <del> <del>// Entries is a list of *Entry with some helpers. <del>type Entries []*Entry <del> <del>// Equals returns true if cmp contains the same data. <del>func (e Entries) Equals(cmp Entries) bool { <del> // Check if the file has really changed. <del> if len(e) != len(cmp) { <del> return false <del> } <del> for i := range e { <del> if !e[i].Equals(cmp[i]) { <del> return false <del> } <del> } <del> return true <del>} <del> <del>// Contains returns true if the Entries contain a given Entry. <del>func (e Entries) Contains(entry *Entry) bool { <del> for _, curr := range e { <del> if curr.Equals(entry) { <del> return true <del> } <del> } <del> return false <del>} <del> <del>// Diff compares two entries and returns the added and removed entries. <del>func (e Entries) Diff(cmp Entries) (Entries, Entries) { <del> added := Entries{} <del> for _, entry := range cmp { <del> if !e.Contains(entry) { <del> added = append(added, entry) <del> } <del> } <del> <del> removed := Entries{} <del> for _, entry := range e { <del> if !cmp.Contains(entry) { <del> removed = append(removed, entry) <del> } <del> } <del> <del> return added, removed <del>} <del> <del>// CreateEntries returns an array of entries based on the given addresses. <del>func CreateEntries(addrs []string) (Entries, error) { <del> entries := Entries{} <del> if addrs == nil { <del> return entries, nil <del> } <del> <del> for _, addr := range addrs { <del> if len(addr) == 0 { <del> continue <del> } <del> entry, err := NewEntry(addr) <del> if err != nil { <del> return nil, err <del> } <del> entries = append(entries, entry) <del> } <del> return entries, nil <del>} <ide><path>pkg/discovery/file/file.go <del>package file // import "github.com/docker/docker/pkg/discovery/file" <del> <del>import ( <del> "fmt" <del> "os" <del> "strings" <del> "time" <del> <del> "github.com/docker/docker/pkg/discovery" <del>) <del> <del>// Discovery is exported <del>type Discovery struct { <del> heartbeat time.Duration <del> path string <del>} <del> <del>func init() { <del> Init() <del>} <del> <del>// Init is exported <del>func Init() { <del> discovery.Register("file", &Discovery{}) <del>} <del> <del>// Initialize is exported <del>func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { <del> s.path = path <del> s.heartbeat = heartbeat <del> return nil <del>} <del> <del>func parseFileContent(content []byte) []string { <del> var result []string <del> for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { <del> line = strings.TrimSpace(line) <del> // Ignoring line starts with # <del> if strings.HasPrefix(line, "#") { <del> continue <del> } <del> // Inlined # comment also ignored. <del> if strings.Contains(line, "#") { <del> line = line[0:strings.Index(line, "#")] <del> // Trim additional spaces caused by above stripping. <del> line = strings.TrimSpace(line) <del> } <del> result = append(result, discovery.Generate(line)...) <del> } <del> return result <del>} <del> <del>func (s *Discovery) fetch() (discovery.Entries, error) { <del> fileContent, err := os.ReadFile(s.path) <del> if err != nil { <del> return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) <del> } <del> return discovery.CreateEntries(parseFileContent(fileContent)) <del>} <del> <del>// Watch is exported <del>func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { <del> ch := make(chan discovery.Entries, 1) <del> errCh := make(chan error, 1) <del> ticker := time.NewTicker(s.heartbeat) <del> <del> go func() { <del> defer close(errCh) <del> defer close(ch) <del> <del> // Send the initial entries if available. <del> currentEntries, err := s.fetch() <del> if err != nil { <del> errCh <- err <del> } else { <del> ch <- currentEntries <del> } <del> <del> // Periodically send updates. <del> for { <del> select { <del> case <-ticker.C: <del> newEntries, err := s.fetch() <del> if err != nil { <del> errCh <- err <del> continue <del> } <del> <del> // Check if the file has really changed. <del> if !newEntries.Equals(currentEntries) { <del> ch <- newEntries <del> } <del> currentEntries = newEntries <del> case <-stopCh: <del> ticker.Stop() <del> return <del> } <del> } <del> }() <del> <del> return ch, errCh <del>} <del> <del>// Register is exported <del>func (s *Discovery) Register(addr string) error { <del> return discovery.ErrNotImplemented <del>} <ide><path>pkg/discovery/file/file_test.go <del>package file // import "github.com/docker/docker/pkg/discovery/file" <del> <del>import ( <del> "os" <del> "testing" <del> <del> "github.com/docker/docker/internal/test/suite" <del> "github.com/docker/docker/pkg/discovery" <del> "gotest.tools/v3/assert" <del>) <del> <del>// Hook up gocheck into the "go test" runner. <del>func Test(t *testing.T) { <del> suite.Run(t, &DiscoverySuite{}) <del>} <del> <del>type DiscoverySuite struct{} <del> <del>func (s *DiscoverySuite) TestInitialize(c *testing.T) { <del> d := &Discovery{} <del> d.Initialize("/path/to/file", 1000, 0, nil) <del> assert.Equal(c, d.path, "/path/to/file") <del>} <del> <del>func (s *DiscoverySuite) TestNew(c *testing.T) { <del> d, err := discovery.New("file:///path/to/file", 0, 0, nil) <del> assert.Assert(c, err == nil) <del> assert.Equal(c, d.(*Discovery).path, "/path/to/file") <del>} <del> <del>func (s *DiscoverySuite) TestContent(c *testing.T) { <del> data := ` <del>1.1.1.[1:2]:1111 <del>2.2.2.[2:4]:2222 <del>` <del> ips := parseFileContent([]byte(data)) <del> assert.Equal(c, len(ips), 5) <del> assert.Equal(c, ips[0], "1.1.1.1:1111") <del> assert.Equal(c, ips[1], "1.1.1.2:1111") <del> assert.Equal(c, ips[2], "2.2.2.2:2222") <del> assert.Equal(c, ips[3], "2.2.2.3:2222") <del> assert.Equal(c, ips[4], "2.2.2.4:2222") <del>} <del> <del>func (s *DiscoverySuite) TestRegister(c *testing.T) { <del> discovery := &Discovery{path: "/path/to/file"} <del> assert.Assert(c, discovery.Register("0.0.0.0") != nil) <del>} <del> <del>func (s *DiscoverySuite) TestParsingContentsWithComments(c *testing.T) { <del> data := ` <del>### test ### <del>1.1.1.1:1111 # inline comment <del># 2.2.2.2:2222 <del> ### empty line with comment <del> 3.3.3.3:3333 <del>### test ### <del>` <del> ips := parseFileContent([]byte(data)) <del> assert.Equal(c, len(ips), 2) <del> assert.Equal(c, "1.1.1.1:1111", ips[0]) <del> assert.Equal(c, "3.3.3.3:3333", ips[1]) <del>} <del> <del>func (s *DiscoverySuite) TestWatch(c *testing.T) { <del> data := ` <del>1.1.1.1:1111 <del>2.2.2.2:2222 <del>` <del> expected := discovery.Entries{ <del> &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, <del> &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, <del> } <del> <del> // Create a temporary file and remove it. <del> tmp, err := os.CreateTemp(os.TempDir(), "discovery-file-test") <del> assert.Assert(c, err == nil) <del> assert.Assert(c, tmp.Close() == nil) <del> assert.Assert(c, os.Remove(tmp.Name()) == nil) <del> <del> // Set up file discovery. <del> d := &Discovery{} <del> d.Initialize(tmp.Name(), 1000, 0, nil) <del> stopCh := make(chan struct{}) <del> ch, errCh := d.Watch(stopCh) <del> <del> // Make sure it fires errors since the file doesn't exist. <del> assert.Assert(c, <-errCh != nil) <del> // We have to drain the error channel otherwise Watch will get stuck. <del> go func() { <del> for range errCh { <del> } <del> }() <del> <del> // Write the file and make sure we get the expected value back. <del> assert.Assert(c, os.WriteFile(tmp.Name(), []byte(data), 0600) == nil) <del> assert.DeepEqual(c, <-ch, expected) <del> <del> // Add a new entry and look it up. <del> expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) <del> f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) <del> assert.Assert(c, err == nil) <del> assert.Assert(c, f != nil) <del> _, err = f.WriteString("\n3.3.3.3:3333\n") <del> assert.Assert(c, err == nil) <del> f.Close() <del> assert.DeepEqual(c, <-ch, expected) <del> <del> // Stop and make sure it closes all channels. <del> close(stopCh) <del> assert.Assert(c, <-ch == nil) <del> assert.Assert(c, <-errCh == nil) <del>} <ide><path>pkg/discovery/generator.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del> <del>import ( <del> "fmt" <del> "regexp" <del> "strconv" <del>) <del> <del>// Generate takes care of IP generation <del>func Generate(pattern string) []string { <del> re, _ := regexp.Compile(`\[(.+):(.+)\]`) <del> submatch := re.FindStringSubmatch(pattern) <del> if submatch == nil { <del> return []string{pattern} <del> } <del> <del> from, err := strconv.Atoi(submatch[1]) <del> if err != nil { <del> return []string{pattern} <del> } <del> to, err := strconv.Atoi(submatch[2]) <del> if err != nil { <del> return []string{pattern} <del> } <del> <del> template := re.ReplaceAllString(pattern, "%d") <del> <del> var result []string <del> for val := from; val <= to; val++ { <del> entry := fmt.Sprintf(template, val) <del> result = append(result, entry) <del> } <del> <del> return result <del>} <ide><path>pkg/discovery/generator_test.go <del>package discovery // import "github.com/docker/docker/pkg/discovery" <del>import ( <del> "testing" <del> <del> "gotest.tools/v3/assert" <del>) <del> <del>func (s *DiscoverySuite) TestGeneratorNotGenerate(c *testing.T) { <del> ips := Generate("127.0.0.1") <del> assert.Equal(c, len(ips), 1) <del> assert.Equal(c, ips[0], "127.0.0.1") <del>} <del> <del>func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *testing.T) { <del> ips := Generate("127.0.0.1:8080") <del> assert.Equal(c, len(ips), 1) <del> assert.Equal(c, ips[0], "127.0.0.1:8080") <del>} <del> <del>func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *testing.T) { <del> ips := Generate("127.0.0.[1]") <del> assert.Equal(c, len(ips), 1) <del> assert.Equal(c, ips[0], "127.0.0.[1]") <del>} <del> <del>func (s *DiscoverySuite) TestGeneratorWithPort(c *testing.T) { <del> ips := Generate("127.0.0.[1:11]:2375") <del> assert.Equal(c, len(ips), 11) <del> assert.Equal(c, ips[0], "127.0.0.1:2375") <del> assert.Equal(c, ips[1], "127.0.0.2:2375") <del> assert.Equal(c, ips[2], "127.0.0.3:2375") <del> assert.Equal(c, ips[3], "127.0.0.4:2375") <del> assert.Equal(c, ips[4], "127.0.0.5:2375") <del> assert.Equal(c, ips[5], "127.0.0.6:2375") <del> assert.Equal(c, ips[6], "127.0.0.7:2375") <del> assert.Equal(c, ips[7], "127.0.0.8:2375") <del> assert.Equal(c, ips[8], "127.0.0.9:2375") <del> assert.Equal(c, ips[9], "127.0.0.10:2375") <del> assert.Equal(c, ips[10], "127.0.0.11:2375") <del>} <del> <del>func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *testing.T) { <del> malformedInput := "127.0.0.[x:11]:2375" <del> ips := Generate(malformedInput) <del> assert.Equal(c, len(ips), 1) <del> assert.Equal(c, ips[0], malformedInput) <del>} <del> <del>func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *testing.T) { <del> malformedInput := "127.0.0.[1:x]:2375" <del> ips := Generate(malformedInput) <del> assert.Equal(c, len(ips), 1) <del> assert.Equal(c, ips[0], malformedInput) <del>} <ide><path>pkg/discovery/kv/kv.go <del>package kv // import "github.com/docker/docker/pkg/discovery/kv" <del> <del>import ( <del> "fmt" <del> "path" <del> "strings" <del> "time" <del> <del> "github.com/docker/docker/pkg/discovery" <del> "github.com/docker/go-connections/tlsconfig" <del> "github.com/docker/libkv" <del> "github.com/docker/libkv/store" <del> "github.com/docker/libkv/store/consul" <del> "github.com/docker/libkv/store/etcd" <del> "github.com/docker/libkv/store/zookeeper" <del> "github.com/sirupsen/logrus" <del>) <del> <del>const ( <del> defaultDiscoveryPath = "docker/nodes" <del>) <del> <del>// Discovery is exported <del>type Discovery struct { <del> backend store.Backend <del> store store.Store <del> heartbeat time.Duration <del> ttl time.Duration <del> prefix string <del> path string <del>} <del> <del>func init() { <del> Init() <del>} <del> <del>// Init is exported <del>func Init() { <del> // Register to libkv <del> zookeeper.Register() <del> consul.Register() <del> etcd.Register() <del> <del> // Register to internal discovery service <del> discovery.Register("zk", &Discovery{backend: store.ZK}) <del> discovery.Register("consul", &Discovery{backend: store.CONSUL}) <del> discovery.Register("etcd", &Discovery{backend: store.ETCD}) <del>} <del> <del>// Initialize is exported <del>func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { <del> var ( <del> parts = strings.SplitN(uris, "/", 2) <del> addrs = strings.Split(parts[0], ",") <del> err error <del> ) <del> <del> // A custom prefix to the path can be optionally used. <del> if len(parts) == 2 { <del> s.prefix = parts[1] <del> } <del> <del> s.heartbeat = heartbeat <del> s.ttl = ttl <del> <del> // Use a custom path if specified in discovery options <del> dpath := defaultDiscoveryPath <del> if clusterOpts["kv.path"] != "" { <del> dpath = clusterOpts["kv.path"] <del> } <del> <del> s.path = path.Join(s.prefix, dpath) <del> <del> var config *store.Config <del> if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { <del> logrus.Info("Initializing discovery with TLS") <del> tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ <del> CAFile: clusterOpts["kv.cacertfile"], <del> CertFile: clusterOpts["kv.certfile"], <del> KeyFile: clusterOpts["kv.keyfile"], <del> }) <del> if err != nil { <del> return err <del> } <del> config = &store.Config{ <del> // Set ClientTLS to trigger https (bug in libkv/etcd) <del> ClientTLS: &store.ClientTLSConfig{ <del> CACertFile: clusterOpts["kv.cacertfile"], <del> CertFile: clusterOpts["kv.certfile"], <del> KeyFile: clusterOpts["kv.keyfile"], <del> }, <del> // The actual TLS config that will be used <del> TLS: tlsConfig, <del> } <del> } else { <del> logrus.Info("Initializing discovery without TLS") <del> } <del> <del> // Creates a new store, will ignore options given <del> // if not supported by the chosen store <del> s.store, err = libkv.NewStore(s.backend, addrs, config) <del> return err <del>} <del> <del>// Watch the store until either there's a store error or we receive a stop request. <del>// Returns false if we shouldn't attempt watching the store anymore (stop request received). <del>func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { <del> for { <del> select { <del> case pairs := <-watchCh: <del> if pairs == nil { <del> return true <del> } <del> <del> logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) <del> <del> // Convert `KVPair` into `discovery.Entry`. <del> addrs := make([]string, len(pairs)) <del> for _, pair := range pairs { <del> addrs = append(addrs, string(pair.Value)) <del> } <del> <del> entries, err := discovery.CreateEntries(addrs) <del> if err != nil { <del> errCh <- err <del> } else { <del> discoveryCh <- entries <del> } <del> case <-stopCh: <del> // We were requested to stop watching. <del> return false <del> } <del> } <del>} <del> <del>// Watch is exported <del>func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { <del> ch := make(chan discovery.Entries) <del> errCh := make(chan error) <del> <del> go func() { <del> defer close(ch) <del> defer close(errCh) <del> <del> // Forever: Create a store watch, watch until we get an error and then try again. <del> // Will only stop if we receive a stopCh request. <del> for { <del> // Create the path to watch if it does not exist yet <del> exists, err := s.store.Exists(s.path) <del> if err != nil { <del> errCh <- err <del> } <del> if !exists { <del> if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { <del> errCh <- err <del> } <del> } <del> <del> // Set up a watch. <del> watchCh, err := s.store.WatchTree(s.path, stopCh) <del> if err != nil { <del> errCh <- err <del> } else { <del> if !s.watchOnce(stopCh, watchCh, ch, errCh) { <del> return <del> } <del> } <del> <del> // If we get here it means the store watch channel was closed. This <del> // is unexpected so let's retry later. <del> errCh <- fmt.Errorf("Unexpected watch error") <del> time.Sleep(s.heartbeat) <del> } <del> }() <del> return ch, errCh <del>} <del> <del>// Register is exported <del>func (s *Discovery) Register(addr string) error { <del> opts := &store.WriteOptions{TTL: s.ttl} <del> return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) <del>} <del> <del>// Store returns the underlying store used by KV discovery. <del>func (s *Discovery) Store() store.Store { <del> return s.store <del>} <del> <del>// Prefix returns the store prefix <del>func (s *Discovery) Prefix() string { <del> return s.prefix <del>} <ide><path>pkg/discovery/kv/kv_test.go <del>package kv // import "github.com/docker/docker/pkg/discovery/kv" <del> <del>import ( <del> "errors" <del> "os" <del> "path" <del> "testing" <del> "time" <del> <del> "github.com/docker/docker/internal/test/suite" <del> "github.com/docker/docker/pkg/discovery" <del> "github.com/docker/libkv" <del> "github.com/docker/libkv/store" <del> "gotest.tools/v3/assert" <del>) <del> <del>// Hook up gocheck into the "go test" runner. <del>func Test(t *testing.T) { <del> suite.Run(t, &DiscoverySuite{}) <del>} <del> <del>type DiscoverySuite struct{} <del> <del>func (ds *DiscoverySuite) TestInitialize(c *testing.T) { <del> storeMock := &FakeStore{ <del> Endpoints: []string{"127.0.0.1"}, <del> } <del> d := &Discovery{backend: store.CONSUL} <del> d.Initialize("127.0.0.1", 0, 0, nil) <del> d.store = storeMock <del> <del> s := d.store.(*FakeStore) <del> assert.Equal(c, len(s.Endpoints), 1) <del> assert.Equal(c, s.Endpoints[0], "127.0.0.1") <del> assert.Equal(c, d.path, defaultDiscoveryPath) <del> <del> storeMock = &FakeStore{ <del> Endpoints: []string{"127.0.0.1:1234"}, <del> } <del> d = &Discovery{backend: store.CONSUL} <del> d.Initialize("127.0.0.1:1234/path", 0, 0, nil) <del> d.store = storeMock <del> <del> s = d.store.(*FakeStore) <del> assert.Equal(c, len(s.Endpoints), 1) <del> assert.Equal(c, s.Endpoints[0], "127.0.0.1:1234") <del> assert.Equal(c, d.path, "path/"+defaultDiscoveryPath) <del> <del> storeMock = &FakeStore{ <del> Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, <del> } <del> d = &Discovery{backend: store.CONSUL} <del> d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) <del> d.store = storeMock <del> <del> s = d.store.(*FakeStore) <del> assert.Equal(c, len(s.Endpoints), 3) <del> assert.Equal(c, s.Endpoints[0], "127.0.0.1:1234") <del> assert.Equal(c, s.Endpoints[1], "127.0.0.2:1234") <del> assert.Equal(c, s.Endpoints[2], "127.0.0.3:1234") <del> <del> assert.Equal(c, d.path, "path/"+defaultDiscoveryPath) <del>} <del> <del>// Extremely limited mock store so we can test initialization <del>type Mock struct { <del> // Endpoints passed to InitializeMock <del> Endpoints []string <del> <del> // Options passed to InitializeMock <del> Options *store.Config <del>} <del> <del>func NewMock(endpoints []string, options *store.Config) (store.Store, error) { <del> s := &Mock{} <del> s.Endpoints = endpoints <del> s.Options = options <del> return s, nil <del>} <del>func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { <del> return errors.New("Put not supported") <del>} <del>func (s *Mock) Get(key string) (*store.KVPair, error) { <del> return nil, errors.New("Get not supported") <del>} <del>func (s *Mock) Delete(key string) error { <del> return errors.New("Delete not supported") <del>} <del> <del>// Exists mock <del>func (s *Mock) Exists(key string) (bool, error) { <del> return false, errors.New("Exists not supported") <del>} <del> <del>// Watch mock <del>func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { <del> return nil, errors.New("Watch not supported") <del>} <del> <del>// WatchTree mock <del>func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { <del> return nil, errors.New("WatchTree not supported") <del>} <del> <del>// NewLock mock <del>func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { <del> return nil, errors.New("NewLock not supported") <del>} <del> <del>// List mock <del>func (s *Mock) List(prefix string) ([]*store.KVPair, error) { <del> return nil, errors.New("List not supported") <del>} <del> <del>// DeleteTree mock <del>func (s *Mock) DeleteTree(prefix string) error { <del> return errors.New("DeleteTree not supported") <del>} <del> <del>// AtomicPut mock <del>func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { <del> return false, nil, errors.New("AtomicPut not supported") <del>} <del> <del>// AtomicDelete mock <del>func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { <del> return false, errors.New("AtomicDelete not supported") <del>} <del> <del>// Close mock <del>func (s *Mock) Close() { <del>} <del> <del>func (ds *DiscoverySuite) TestInitializeWithCerts(c *testing.T) { <del> cert := `-----BEGIN CERTIFICATE----- <del>MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT <del>B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD <del>VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC <del>O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds <del>+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q <del>V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb <del>UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 <del>Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT <del>V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ <del>BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j <del>BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz <del>7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI <del>xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M <del>ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY <del>8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn <del>t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX <del>FpTxDmJHEV4bzUzh <del>-----END CERTIFICATE----- <del>` <del> key := `-----BEGIN RSA PRIVATE KEY----- <del>MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 <del>+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR <del>SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr <del>pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe <del>rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj <del>xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj <del>i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx <del>qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO <del>1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 <del>5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony <del>MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 <del>ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP <del>L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N <del>XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT <del>Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B <del>LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU <del>t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ <del>QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV <del>xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj <del>xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc <del>qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa <del>V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV <del>PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk <del>dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL <del>BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= <del>-----END RSA PRIVATE KEY----- <del>` <del> certFile, err := os.CreateTemp("", "cert") <del> assert.Assert(c, err == nil) <del> defer os.Remove(certFile.Name()) <del> certFile.Write([]byte(cert)) <del> certFile.Close() <del> keyFile, err := os.CreateTemp("", "key") <del> assert.Assert(c, err == nil) <del> defer os.Remove(keyFile.Name()) <del> keyFile.Write([]byte(key)) <del> keyFile.Close() <del> <del> libkv.AddStore("mock", NewMock) <del> d := &Discovery{backend: "mock"} <del> err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ <del> "kv.cacertfile": certFile.Name(), <del> "kv.certfile": certFile.Name(), <del> "kv.keyfile": keyFile.Name(), <del> }) <del> assert.Assert(c, err == nil) <del> s := d.store.(*Mock) <del> assert.Assert(c, s.Options.TLS != nil) <del> assert.Assert(c, s.Options.TLS.RootCAs != nil) <del> assert.Equal(c, len(s.Options.TLS.Certificates), 1) <del>} <del> <del>func (ds *DiscoverySuite) TestWatch(c *testing.T) { <del> mockCh := make(chan []*store.KVPair) <del> <del> storeMock := &FakeStore{ <del> Endpoints: []string{"127.0.0.1:1234"}, <del> mockKVChan: mockCh, <del> } <del> <del> d := &Discovery{backend: store.CONSUL} <del> d.Initialize("127.0.0.1:1234/path", 0, 0, nil) <del> d.store = storeMock <del> <del> expected := discovery.Entries{ <del> &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, <del> &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, <del> } <del> kvs := []*store.KVPair{ <del> {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, <del> {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, <del> } <del> <del> stopCh := make(chan struct{}) <del> ch, errCh := d.Watch(stopCh) <del> <del> // It should fire an error since the first WatchTree call failed. <del> assert.ErrorContains(c, <-errCh, "test error") <del> // We have to drain the error channel otherwise Watch will get stuck. <del> go func() { <del> for range errCh { <del> } <del> }() <del> <del> // Push the entries into the store channel and make sure discovery emits. <del> mockCh <- kvs <del> assert.DeepEqual(c, <-ch, expected) <del> <del> // Add a new entry. <del> expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) <del> kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) <del> mockCh <- kvs <del> assert.DeepEqual(c, <-ch, expected) <del> <del> close(mockCh) <del> // Give it enough time to call WatchTree. <del> time.Sleep(3 * time.Second) <del> <del> // Stop and make sure it closes all channels. <del> close(stopCh) <del> assert.Assert(c, <-ch == nil) <del> assert.Assert(c, <-errCh == nil) <del>} <del> <del>// FakeStore implements store.Store methods. It mocks all store <del>// function in a simple, naive way. <del>type FakeStore struct { <del> Endpoints []string <del> Options *store.Config <del> mockKVChan <-chan []*store.KVPair <del> <del> watchTreeCallCount int <del>} <del> <del>func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { <del> return nil <del>} <del> <del>func (s *FakeStore) Get(key string) (*store.KVPair, error) { <del> return nil, nil <del>} <del> <del>func (s *FakeStore) Delete(key string) error { <del> return nil <del>} <del> <del>func (s *FakeStore) Exists(key string) (bool, error) { <del> return true, nil <del>} <del> <del>func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { <del> return nil, nil <del>} <del> <del>// WatchTree will fail the first time, and return the mockKVchan afterwards. <del>// This is the behavior we need for testing.. If we need 'moar', should update this. <del>func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { <del> if s.watchTreeCallCount == 0 { <del> s.watchTreeCallCount = 1 <del> return nil, errors.New("test error") <del> } <del> // First calls error <del> return s.mockKVChan, nil <del>} <del> <del>func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { <del> return nil, nil <del>} <del> <del>func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { <del> return []*store.KVPair{}, nil <del>} <del> <del>func (s *FakeStore) DeleteTree(directory string) error { <del> return nil <del>} <del> <del>func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { <del> return true, nil, nil <del>} <del> <del>func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { <del> return true, nil <del>} <del> <del>func (s *FakeStore) Close() { <del>} <ide><path>pkg/discovery/memory/memory.go <del>package memory // import "github.com/docker/docker/pkg/discovery/memory" <del> <del>import ( <del> "sync" <del> "time" <del> <del> "github.com/docker/docker/pkg/discovery" <del>) <del> <del>// Discovery implements a discovery backend that keeps <del>// data in memory. <del>type Discovery struct { <del> heartbeat time.Duration <del> values []string <del> mu sync.Mutex <del>} <del> <del>func init() { <del> Init() <del>} <del> <del>// Init registers the memory backend on demand. <del>func Init() { <del> discovery.Register("memory", &Discovery{}) <del>} <del> <del>// Initialize sets the heartbeat for the memory backend. <del>func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { <del> s.heartbeat = heartbeat <del> s.values = make([]string, 0) <del> return nil <del>} <del> <del>// Watch sends periodic discovery updates to a channel. <del>func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { <del> ch := make(chan discovery.Entries, 1) <del> errCh := make(chan error, 1) <del> ticker := time.NewTicker(s.heartbeat) <del> <del> go func() { <del> defer close(errCh) <del> defer close(ch) <del> <del> // Send the initial entries if available. <del> var currentEntries discovery.Entries <del> var err error <del> <del> s.mu.Lock() <del> if len(s.values) > 0 { <del> currentEntries, err = discovery.CreateEntries(s.values) <del> } <del> s.mu.Unlock() <del> <del> if err != nil { <del> errCh <- err <del> } else if currentEntries != nil { <del> ch <- currentEntries <del> } <del> <del> // Periodically send updates. <del> for { <del> select { <del> case <-ticker.C: <del> s.mu.Lock() <del> newEntries, err := discovery.CreateEntries(s.values) <del> s.mu.Unlock() <del> if err != nil { <del> errCh <- err <del> continue <del> } <del> <del> // Check if the file has really changed. <del> if !newEntries.Equals(currentEntries) { <del> ch <- newEntries <del> } <del> currentEntries = newEntries <del> case <-stopCh: <del> ticker.Stop() <del> return <del> } <del> } <del> }() <del> <del> return ch, errCh <del>} <del> <del>// Register adds a new address to the discovery. <del>func (s *Discovery) Register(addr string) error { <del> s.mu.Lock() <del> s.values = append(s.values, addr) <del> s.mu.Unlock() <del> return nil <del>} <ide><path>pkg/discovery/memory/memory_test.go <del>package memory // import "github.com/docker/docker/pkg/discovery/memory" <del> <del>import ( <del> "testing" <del> <del> "github.com/docker/docker/internal/test/suite" <del> "github.com/docker/docker/pkg/discovery" <del> "gotest.tools/v3/assert" <del>) <del> <del>// Hook up gocheck into the "go test" runner. <del>func Test(t *testing.T) { <del> suite.Run(t, &discoverySuite{}) <del>} <del> <del>type discoverySuite struct{} <del> <del>func (s *discoverySuite) TestWatch(c *testing.T) { <del> d := &Discovery{} <del> d.Initialize("foo", 1000, 0, nil) <del> stopCh := make(chan struct{}) <del> ch, errCh := d.Watch(stopCh) <del> <del> // We have to drain the error channel otherwise Watch will get stuck. <del> go func() { <del> for range errCh { <del> } <del> }() <del> <del> expected := discovery.Entries{ <del> &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, <del> } <del> <del> assert.Assert(c, d.Register("1.1.1.1:1111") == nil) <del> assert.DeepEqual(c, <-ch, expected) <del> <del> expected = discovery.Entries{ <del> &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, <del> &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, <del> } <del> <del> assert.Assert(c, d.Register("2.2.2.2:2222") == nil) <del> assert.DeepEqual(c, <-ch, expected) <del> <del> // Stop and make sure it closes all channels. <del> close(stopCh) <del> assert.Assert(c, <-ch == nil) <del> assert.Assert(c, <-errCh == nil) <del>} <ide><path>pkg/discovery/nodes/nodes.go <del>package nodes // import "github.com/docker/docker/pkg/discovery/nodes" <del> <del>import ( <del> "fmt" <del> "strings" <del> "time" <del> <del> "github.com/docker/docker/pkg/discovery" <del>) <del> <del>// Discovery is exported <del>type Discovery struct { <del> entries discovery.Entries <del>} <del> <del>func init() { <del> Init() <del>} <del> <del>// Init is exported <del>func Init() { <del> discovery.Register("nodes", &Discovery{}) <del>} <del> <del>// Initialize is exported <del>func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { <del> for _, input := range strings.Split(uris, ",") { <del> for _, ip := range discovery.Generate(input) { <del> entry, err := discovery.NewEntry(ip) <del> if err != nil { <del> return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) <del> } <del> s.entries = append(s.entries, entry) <del> } <del> } <del> <del> return nil <del>} <del> <del>// Watch is exported <del>func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { <del> ch := make(chan discovery.Entries, 1) <del> go func() { <del> defer close(ch) <del> ch <- s.entries <del> <-stopCh <del> }() <del> return ch, nil <del>} <del> <del>// Register is exported <del>func (s *Discovery) Register(addr string) error { <del> return discovery.ErrNotImplemented <del>} <ide><path>pkg/discovery/nodes/nodes_test.go <del>package nodes // import "github.com/docker/docker/pkg/discovery/nodes" <del> <del>import ( <del> "testing" <del> <del> "github.com/docker/docker/internal/test/suite" <del> "github.com/docker/docker/pkg/discovery" <del> "gotest.tools/v3/assert" <del>) <del> <del>// Hook up gocheck into the "go test" runner. <del>func Test(t *testing.T) { <del> suite.Run(t, &DiscoverySuite{}) <del>} <del> <del>type DiscoverySuite struct{} <del> <del>func (s *DiscoverySuite) TestInitialize(c *testing.T) { <del> d := &Discovery{} <del> d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) <del> assert.Equal(c, len(d.entries), 2) <del> assert.Equal(c, d.entries[0].String(), "1.1.1.1:1111") <del> assert.Equal(c, d.entries[1].String(), "2.2.2.2:2222") <del>} <del> <del>func (s *DiscoverySuite) TestInitializeWithPattern(c *testing.T) { <del> d := &Discovery{} <del> d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) <del> assert.Equal(c, len(d.entries), 5) <del> assert.Equal(c, d.entries[0].String(), "1.1.1.1:1111") <del> assert.Equal(c, d.entries[1].String(), "1.1.1.2:1111") <del> assert.Equal(c, d.entries[2].String(), "2.2.2.2:2222") <del> assert.Equal(c, d.entries[3].String(), "2.2.2.3:2222") <del> assert.Equal(c, d.entries[4].String(), "2.2.2.4:2222") <del>} <del> <del>func (s *DiscoverySuite) TestWatch(c *testing.T) { <del> d := &Discovery{} <del> d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) <del> expected := discovery.Entries{ <del> &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, <del> &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, <del> } <del> ch, _ := d.Watch(nil) <del> assert.Equal(c, expected.Equals(<-ch), true) <del>} <del> <del>func (s *DiscoverySuite) TestRegister(c *testing.T) { <del> d := &Discovery{} <del> assert.Assert(c, d.Register("0.0.0.0") != nil) <del>}
15
Python
Python
fix identity init
e361a26463bfda37c42acd3df1e4efb6d97215fe
<ide><path>keras/backend/tensorflow_backend.py <ide> def eye(size, dtype=None, name=None): <ide> else: <ide> n, m = size, size <ide> with tf_ops.init_scope(): <del> return variable(tf.eye(n, m, dtype=dtype), dtype, name) <add> v = tf.eye(n, m, dtype=dtype, name=name) <add> if py_all(v.shape.as_list()): <add> return variable(v, dtype=dtype, name=name) <add> return v <ide> <ide> <ide> @symbolic <ide><path>keras/initializers.py <ide> class Identity(Initializer): <ide> """Initializer that generates the identity matrix. <ide> <ide> Only use for 2D matrices. <del> If the desired matrix is not square, it pads with zeros on the <del> additional rows/columns <add> If the desired matrix is not square, it gets padded <add> with zeros for the additional rows/columns. <ide> <ide> # Arguments <ide> gain: Multiplicative factor to apply to the identity matrix. <ide> class Identity(Initializer): <ide> def __init__(self, gain=1.): <ide> self.gain = gain <ide> <add> @K.eager <ide> def __call__(self, shape, dtype=None): <ide> if len(shape) != 2: <ide> raise ValueError( <del> 'Identity matrix initializer can only be used for 2D matrices.') <del> <add> 'Identity matrix initializer ' <add> 'can only be used for 2D matrices.') <ide> return self.gain * K.eye((shape[0], shape[1]), dtype=dtype) <ide> <ide> def get_config(self):
2
Java
Java
fix issues in rsocketmessagehandler initialization
88016d47d01c74157f638dc1217ee6aa2428c65f
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/handler/annotation/reactive/MessageMappingMessageHandler.java <ide> import org.springframework.context.EmbeddedValueResolverAware; <ide> import org.springframework.core.KotlinDetector; <ide> import org.springframework.core.annotation.AnnotatedElementUtils; <add>import org.springframework.core.codec.ByteArrayDecoder; <add>import org.springframework.core.codec.ByteBufferDecoder; <add>import org.springframework.core.codec.DataBufferDecoder; <ide> import org.springframework.core.codec.Decoder; <add>import org.springframework.core.codec.StringDecoder; <ide> import org.springframework.core.convert.ConversionService; <ide> import org.springframework.format.support.DefaultFormattingConversionService; <ide> import org.springframework.lang.Nullable; <ide> public class MessageMappingMessageHandler extends AbstractMethodMessageHandler<C <ide> <ide> public MessageMappingMessageHandler() { <ide> setHandlerPredicate(type -> AnnotatedElementUtils.hasAnnotation(type, Controller.class)); <add> this.decoders.add(StringDecoder.allMimeTypes()); <add> this.decoders.add(new ByteBufferDecoder()); <add> this.decoders.add(new ByteArrayDecoder()); <add> this.decoders.add(new DataBufferDecoder()); <ide> } <ide> <ide> <ide> /** <ide> * Configure the decoders to use for incoming payloads. <ide> */ <ide> public void setDecoders(List<? extends Decoder<?>> decoders) { <add> this.decoders.clear(); <ide> this.decoders.addAll(decoders); <ide> } <ide> <ide> public void setEmbeddedValueResolver(StringValueResolver resolver) { <ide> } <ide> <ide> <add> @Override <add> public void afterPropertiesSet() { <add> <add> // Initialize RouteMatcher before parent initializes handler mappings <add> if (this.routeMatcher == null) { <add> AntPathMatcher pathMatcher = new AntPathMatcher(); <add> pathMatcher.setPathSeparator("."); <add> this.routeMatcher = new SimpleRouteMatcher(pathMatcher); <add> } <add> <add> super.afterPropertiesSet(); <add> } <add> <ide> @Override <ide> protected List<? extends HandlerMethodArgumentResolver> initArgumentResolvers() { <ide> List<HandlerMethodArgumentResolver> resolvers = new ArrayList<>(); <ide> protected List<? extends HandlerMethodArgumentResolver> initArgumentResolvers() <ide> resolvers.add(new PayloadMethodArgumentResolver( <ide> getDecoders(), this.validator, getReactiveAdapterRegistry(), true)); <ide> <del> if (this.routeMatcher == null) { <del> AntPathMatcher pathMatcher = new AntPathMatcher(); <del> pathMatcher.setPathSeparator("."); <del> this.routeMatcher = new SimpleRouteMatcher(pathMatcher); <del> } <del> <ide> return resolvers; <ide> } <ide> <ide><path>spring-messaging/src/main/java/org/springframework/messaging/rsocket/DefaultRSocketStrategies.java <ide> import org.springframework.core.io.buffer.NettyDataBufferFactory; <ide> import org.springframework.lang.Nullable; <ide> import org.springframework.util.AntPathMatcher; <del>import org.springframework.util.Assert; <ide> import org.springframework.util.MimeTypeUtils; <ide> import org.springframework.util.RouteMatcher; <ide> import org.springframework.util.SimpleRouteMatcher; <ide> static class DefaultRSocketStrategiesBuilder implements RSocketStrategies.Builde <ide> @Nullable <ide> private MetadataExtractor metadataExtractor; <ide> <add> @Nullable <ide> private ReactiveAdapterRegistry adapterRegistry = ReactiveAdapterRegistry.getSharedInstance(); <ide> <ide> @Nullable <ide> static class DefaultRSocketStrategiesBuilder implements RSocketStrategies.Builde <ide> DefaultRSocketStrategiesBuilder(RSocketStrategies other) { <ide> this.encoders.addAll(other.encoders()); <ide> this.decoders.addAll(other.decoders()); <add> this.routeMatcher = other.routeMatcher(); <add> this.metadataExtractor = other.metadataExtractor(); <ide> this.adapterRegistry = other.reactiveAdapterRegistry(); <ide> this.bufferFactory = other.dataBufferFactory(); <ide> } <ide> public Builder decoders(Consumer<List<Decoder<?>>> consumer) { <ide> } <ide> <ide> @Override <del> public Builder routeMatcher(RouteMatcher routeMatcher) { <add> public Builder routeMatcher(@Nullable RouteMatcher routeMatcher) { <ide> this.routeMatcher = routeMatcher; <ide> return this; <ide> } <ide> <ide> @Override <del> public Builder metadataExtractor(MetadataExtractor metadataExtractor) { <add> public Builder metadataExtractor(@Nullable MetadataExtractor metadataExtractor) { <ide> this.metadataExtractor = metadataExtractor; <ide> return this; <ide> } <ide> <ide> @Override <del> public Builder reactiveAdapterStrategy(ReactiveAdapterRegistry registry) { <del> Assert.notNull(registry, "ReactiveAdapterRegistry is required"); <add> public Builder reactiveAdapterStrategy(@Nullable ReactiveAdapterRegistry registry) { <ide> this.adapterRegistry = registry; <ide> return this; <ide> } <ide> <ide> @Override <del> public Builder dataBufferFactory(DataBufferFactory bufferFactory) { <add> public Builder dataBufferFactory(@Nullable DataBufferFactory bufferFactory) { <ide> this.bufferFactory = bufferFactory; <ide> return this; <ide> } <ide> public RSocketStrategies build() { <ide> this.routeMatcher != null ? this.routeMatcher : initRouteMatcher(), <ide> this.metadataExtractor != null ? this.metadataExtractor : initMetadataExtractor(), <ide> this.bufferFactory != null ? this.bufferFactory : initBufferFactory(), <del> this.adapterRegistry); <add> this.adapterRegistry != null ? this.adapterRegistry : initReactiveAdapterRegistry()); <ide> } <ide> <ide> private RouteMatcher initRouteMatcher() { <ide> private MetadataExtractor initMetadataExtractor() { <ide> private DataBufferFactory initBufferFactory() { <ide> return new NettyDataBufferFactory(PooledByteBufAllocator.DEFAULT); <ide> } <add> <add> private ReactiveAdapterRegistry initReactiveAdapterRegistry() { <add> return ReactiveAdapterRegistry.getSharedInstance(); <add> } <ide> } <ide> <ide> } <ide><path>spring-messaging/src/main/java/org/springframework/messaging/rsocket/RSocketStrategies.java <ide> interface Builder { <ide> * efficiency consider using the {@code PathPatternRouteMatcher} from <ide> * {@code spring-web} instead. <ide> */ <del> Builder routeMatcher(RouteMatcher routeMatcher); <add> Builder routeMatcher(@Nullable RouteMatcher routeMatcher); <ide> <ide> /** <ide> * Configure a {@link MetadataExtractor} to extract the route along with <ide> interface Builder { <ide> * route from {@code "message/x.rsocket.routing.v0"} or <ide> * {@code "text/plain"} metadata entries. <ide> */ <del> Builder metadataExtractor(MetadataExtractor metadataExtractor); <add> Builder metadataExtractor(@Nullable MetadataExtractor metadataExtractor); <ide> <ide> /** <ide> * Configure the registry for reactive type support. This can be used to <ide> * to adapt to, and/or determine the semantics of a given <ide> * {@link org.reactivestreams.Publisher Publisher}. <ide> * <p>By default this {@link ReactiveAdapterRegistry#getSharedInstance()}. <ide> */ <del> Builder reactiveAdapterStrategy(ReactiveAdapterRegistry registry); <add> Builder reactiveAdapterStrategy(@Nullable ReactiveAdapterRegistry registry); <ide> <ide> /** <ide> * Configure the DataBufferFactory to use for allocating buffers when <ide> interface Builder { <ide> * <p>If using {@link DefaultDataBufferFactory} instead, there is no <ide> * need for related config changes in RSocket. <ide> */ <del> Builder dataBufferFactory(DataBufferFactory bufferFactory); <add> Builder dataBufferFactory(@Nullable DataBufferFactory bufferFactory); <ide> <ide> /** <ide> * Build the {@code RSocketStrategies} instance. <ide><path>spring-messaging/src/main/java/org/springframework/messaging/rsocket/annotation/support/RSocketMessageHandler.java <ide> import org.springframework.beans.BeanUtils; <ide> import org.springframework.core.ReactiveAdapterRegistry; <ide> import org.springframework.core.annotation.AnnotatedElementUtils; <add>import org.springframework.core.codec.ByteArrayEncoder; <add>import org.springframework.core.codec.ByteBufferEncoder; <add>import org.springframework.core.codec.CharSequenceEncoder; <add>import org.springframework.core.codec.DataBufferEncoder; <ide> import org.springframework.core.codec.Decoder; <ide> import org.springframework.core.codec.Encoder; <ide> import org.springframework.lang.Nullable; <ide> public class RSocketMessageHandler extends MessageMappingMessageHandler { <ide> private MimeType defaultMetadataMimeType = MetadataExtractor.COMPOSITE_METADATA; <ide> <ide> <add> public RSocketMessageHandler() { <add> this.encoders.add(CharSequenceEncoder.allMimeTypes()); <add> this.encoders.add(new ByteBufferEncoder()); <add> this.encoders.add(new ByteArrayEncoder()); <add> this.encoders.add(new DataBufferEncoder()); <add> } <add> <add> <ide> /** <ide> * {@inheritDoc} <ide> * <p>If {@link #setRSocketStrategies(RSocketStrategies) rsocketStrategies} <ide> public void setDecoders(List<? extends Decoder<?>> decoders) { <ide> * other properties. <ide> */ <ide> public void setEncoders(List<? extends Encoder<?>> encoders) { <add> this.encoders.clear(); <ide> this.encoders.addAll(encoders); <ide> } <ide> <ide> public List<? extends Encoder<?>> getEncoders() { <ide> * </ul> <ide> * <p>By default if this is not set, it is initialized from the above. <ide> */ <del> public void setRSocketStrategies(@Nullable RSocketStrategies rsocketStrategies) { <del> this.rsocketStrategies = rsocketStrategies; <del> if (rsocketStrategies != null) { <del> setDecoders(rsocketStrategies.decoders()); <del> setEncoders(rsocketStrategies.encoders()); <del> setReactiveAdapterRegistry(rsocketStrategies.reactiveAdapterRegistry()); <del> } <add> public void setRSocketStrategies(RSocketStrategies rsocketStrategies) { <add> setDecoders(rsocketStrategies.decoders()); <add> setEncoders(rsocketStrategies.encoders()); <add> setRouteMatcher(rsocketStrategies.routeMatcher()); <add> setMetadataExtractor(rsocketStrategies.metadataExtractor()); <add> setReactiveAdapterRegistry(rsocketStrategies.reactiveAdapterRegistry()); <ide> } <ide> <ide> /** <del> * Return the configured {@link RSocketStrategies}. This may be {@code null} <del> * before {@link #afterPropertiesSet()} is called. <add> * Return an {@link RSocketStrategies} instance initialized from the <add> * corresponding properties listed under {@link #setRSocketStrategies}. <ide> */ <del> @Nullable <ide> public RSocketStrategies getRSocketStrategies() { <del> return this.rsocketStrategies; <add> return this.rsocketStrategies != null ? this.rsocketStrategies : initRSocketStrategies(); <add> } <add> <add> private RSocketStrategies initRSocketStrategies() { <add> return RSocketStrategies.builder() <add> .decoders(List::clear) <add> .encoders(List::clear) <add> .decoders(decoders -> decoders.addAll(getDecoders())) <add> .encoders(encoders -> encoders.addAll(getEncoders())) <add> .routeMatcher(getRouteMatcher()) <add> .metadataExtractor(getMetadataExtractor()) <add> .reactiveAdapterStrategy(getReactiveAdapterRegistry()) <add> .build(); <ide> } <ide> <ide> /** <ide> public MimeType getDefaultMetadataMimeType() { <ide> @Override <ide> public void afterPropertiesSet() { <ide> <add> // Add argument resolver before parent initializes argument resolution <ide> getArgumentResolverConfigurer().addCustomResolver(new RSocketRequesterMethodArgumentResolver()); <add> <ide> super.afterPropertiesSet(); <ide> <ide> if (getMetadataExtractor() == null) { <ide> public void afterPropertiesSet() { <ide> setMetadataExtractor(extractor); <ide> } <ide> <del> if (this.rsocketStrategies == null) { <del> this.rsocketStrategies = RSocketStrategies.builder() <del> .decoder(getDecoders().toArray(new Decoder<?>[0])) <del> .encoder(getEncoders().toArray(new Encoder<?>[0])) <del> .routeMatcher(getRouteMatcher()) <del> .metadataExtractor(getMetadataExtractor()) <del> .reactiveAdapterStrategy(getReactiveAdapterRegistry()) <del> .build(); <del> } <add> this.rsocketStrategies = initRSocketStrategies(); <ide> } <ide> <ide> @Override <ide><path>spring-messaging/src/test/java/org/springframework/messaging/rsocket/DefaultRSocketStrategiesTests.java <add>/* <add> * Copyright 2002-2019 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * https://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.messaging.rsocket; <add> <add>import org.junit.Test; <add> <add>import org.springframework.core.ReactiveAdapterRegistry; <add>import org.springframework.core.codec.ByteArrayDecoder; <add>import org.springframework.core.codec.ByteArrayEncoder; <add>import org.springframework.core.codec.ByteBufferDecoder; <add>import org.springframework.core.codec.ByteBufferEncoder; <add>import org.springframework.core.codec.CharSequenceEncoder; <add>import org.springframework.core.codec.DataBufferDecoder; <add>import org.springframework.core.codec.DataBufferEncoder; <add>import org.springframework.core.codec.StringDecoder; <add>import org.springframework.util.AntPathMatcher; <add>import org.springframework.util.SimpleRouteMatcher; <add> <add>import static org.assertj.core.api.Assertions.assertThat; <add> <add>/** <add> * Unit tests for {@link RSocketStrategies}. <add> * @author Rossen Stoyanchev <add> * @since 5.2 <add> */ <add>public class DefaultRSocketStrategiesTests { <add> <add> @Test <add> public void defaultSettings() { <add> RSocketStrategies strategies = RSocketStrategies.create(); <add> <add> assertThat(strategies.encoders()).hasSize(4).hasOnlyElementsOfTypes( <add> CharSequenceEncoder.class, <add> ByteArrayEncoder.class, <add> ByteBufferEncoder.class, <add> DataBufferEncoder.class); <add> <add> assertThat(strategies.decoders()).hasSize(4).hasOnlyElementsOfTypes( <add> StringDecoder.class, <add> ByteArrayDecoder.class, <add> ByteBufferDecoder.class, <add> DataBufferDecoder.class); <add> <add> assertThat(strategies.routeMatcher()).isNotNull(); <add> assertThat(strategies.metadataExtractor()).isNotNull(); <add> assertThat(strategies.reactiveAdapterRegistry()).isNotNull(); <add> } <add> <add> @Test <add> public void explicitValues() { <add> <add> SimpleRouteMatcher matcher = new SimpleRouteMatcher(new AntPathMatcher()); <add> DefaultMetadataExtractor extractor = new DefaultMetadataExtractor(); <add> ReactiveAdapterRegistry registry = new ReactiveAdapterRegistry(); <add> <add> RSocketStrategies strategies = RSocketStrategies.builder() <add> .encoders(encoders -> { <add> encoders.clear(); <add> encoders.add(new ByteArrayEncoder()); <add> }) <add> .decoders(decoders -> { <add> decoders.clear(); <add> decoders.add(new ByteArrayDecoder()); <add> }) <add> .routeMatcher(matcher) <add> .metadataExtractor(extractor) <add> .reactiveAdapterStrategy(registry) <add> .build(); <add> <add> assertThat(strategies.encoders()).hasSize(1); <add> assertThat(strategies.decoders()).hasSize(1); <add> assertThat(strategies.routeMatcher()).isSameAs(matcher); <add> assertThat(strategies.metadataExtractor()).isSameAs(extractor); <add> assertThat(strategies.reactiveAdapterRegistry()).isSameAs(registry); <add> } <add> <add> @Test <add> public void copyConstructor() { <add> RSocketStrategies strategies1 = RSocketStrategies.create(); <add> RSocketStrategies strategies2 = strategies1.mutate().build(); <add> <add> assertThat(strategies1.encoders()).hasSameElementsAs(strategies2.encoders()); <add> assertThat(strategies1.decoders()).hasSameElementsAs(strategies2.decoders()); <add> assertThat(strategies1.routeMatcher()).isSameAs(strategies2.routeMatcher()); <add> assertThat(strategies1.metadataExtractor()).isSameAs(strategies2.metadataExtractor()); <add> assertThat(strategies1.reactiveAdapterRegistry()).isSameAs(strategies2.reactiveAdapterRegistry()); <add> } <add> <add>} <ide><path>spring-messaging/src/test/java/org/springframework/messaging/rsocket/annotation/support/RSocketMessageHandlerTests.java <ide> package org.springframework.messaging.rsocket.annotation.support; <ide> <ide> import java.util.Collections; <add>import java.util.List; <ide> import java.util.Map; <ide> <ide> import io.rsocket.frame.FrameType; <ide> import org.junit.Test; <ide> <add>import org.springframework.core.ReactiveAdapterRegistry; <add>import org.springframework.core.codec.ByteArrayDecoder; <add>import org.springframework.core.codec.ByteArrayEncoder; <add>import org.springframework.core.codec.ByteBufferDecoder; <add>import org.springframework.core.codec.ByteBufferEncoder; <ide> import org.springframework.core.codec.CharSequenceEncoder; <add>import org.springframework.core.codec.DataBufferDecoder; <add>import org.springframework.core.codec.DataBufferEncoder; <ide> import org.springframework.core.codec.StringDecoder; <ide> import org.springframework.messaging.Message; <ide> import org.springframework.messaging.handler.CompositeMessageCondition; <ide> import org.springframework.messaging.handler.DestinationPatternsMessageCondition; <ide> import org.springframework.messaging.handler.HandlerMethod; <ide> import org.springframework.messaging.handler.annotation.MessageMapping; <add>import org.springframework.messaging.rsocket.DefaultMetadataExtractor; <add>import org.springframework.messaging.rsocket.RSocketStrategies; <ide> import org.springframework.messaging.rsocket.annotation.ConnectMapping; <ide> import org.springframework.messaging.support.MessageBuilder; <ide> import org.springframework.messaging.support.MessageHeaderAccessor; <ide> */ <ide> public class RSocketMessageHandlerTests { <ide> <add> @Test <add> public void rsocketStrategiesInitializedFromOtherProperties() { <add> RSocketMessageHandler handler = new RSocketMessageHandler(); <add> handler.setDecoders(Collections.singletonList(new ByteArrayDecoder())); <add> handler.setEncoders(Collections.singletonList(new ByteArrayEncoder())); <add> handler.setRouteMatcher(new SimpleRouteMatcher(new AntPathMatcher())); <add> handler.setMetadataExtractor(new DefaultMetadataExtractor()); <add> handler.setReactiveAdapterRegistry(new ReactiveAdapterRegistry()); <add> handler.afterPropertiesSet(); <add> <add> RSocketStrategies strategies = handler.getRSocketStrategies(); <add> assertThat(strategies).isNotNull(); <add> assertThat(strategies.encoders()).isEqualTo(handler.getEncoders()); <add> assertThat(strategies.decoders()).isEqualTo(handler.getDecoders()); <add> assertThat(strategies.routeMatcher()).isSameAs(handler.getRouteMatcher()); <add> assertThat(strategies.metadataExtractor()).isSameAs(handler.getMetadataExtractor()); <add> assertThat(strategies.reactiveAdapterRegistry()).isSameAs(handler.getReactiveAdapterRegistry()); <add> } <add> <add> @Test <add> public void rsocketStrategiesInitializedFromDefaults() { <add> <add> RSocketMessageHandler handler = new RSocketMessageHandler(); <add> handler.afterPropertiesSet(); <add> <add> RSocketStrategies strategies = handler.getRSocketStrategies(); <add> assertThat(strategies).isNotNull(); <add> <add> assertThat(strategies.encoders()).hasSize(4).hasOnlyElementsOfTypes( <add> CharSequenceEncoder.class, <add> ByteArrayEncoder.class, <add> ByteBufferEncoder.class, <add> DataBufferEncoder.class); <add> <add> assertThat(strategies.decoders()).hasSize(4).hasOnlyElementsOfTypes( <add> StringDecoder.class, <add> ByteArrayDecoder.class, <add> ByteBufferDecoder.class, <add> DataBufferDecoder.class); <add> <add> assertThat(strategies.routeMatcher()).isSameAs(handler.getRouteMatcher()).isNotNull(); <add> assertThat(strategies.metadataExtractor()).isSameAs(handler.getMetadataExtractor()).isNotNull(); <add> assertThat(strategies.reactiveAdapterRegistry()).isSameAs(handler.getReactiveAdapterRegistry()).isNotNull(); <add> } <add> <add> @Test <add> public void rsocketStrategiesSetsOtherProperties() { <add> <add> RSocketStrategies strategies = RSocketStrategies.builder() <add> .encoders(List::clear) <add> .decoders(List::clear) <add> .encoders(encoders -> encoders.add(new ByteArrayEncoder())) <add> .decoders(decoders -> decoders.add(new ByteArrayDecoder())) <add> .routeMatcher(new SimpleRouteMatcher(new AntPathMatcher())) <add> .metadataExtractor(new DefaultMetadataExtractor()) <add> .reactiveAdapterStrategy(new ReactiveAdapterRegistry()) <add> .build(); <add> <add> RSocketMessageHandler handler = new RSocketMessageHandler(); <add> handler.setRSocketStrategies(strategies); <add> handler.afterPropertiesSet(); <add> <add> assertThat(handler.getEncoders()).isEqualTo(strategies.encoders()); <add> assertThat(handler.getDecoders()).isEqualTo(strategies.decoders()); <add> assertThat(handler.getRouteMatcher()).isSameAs(strategies.routeMatcher()); <add> assertThat(handler.getMetadataExtractor()).isSameAs(strategies.metadataExtractor()); <add> assertThat(handler.getReactiveAdapterRegistry()).isSameAs(strategies.reactiveAdapterRegistry()); <add> } <add> <add> @Test <add> public void rsocketStrategiesReflectsFurtherChangesToOtherProperties() { <add> <add> RSocketMessageHandler handler = new RSocketMessageHandler(); <add> <add> // RSocketStrategies sets other properties first <add> handler.setRSocketStrategies(RSocketStrategies.builder() <add> .encoders(List::clear) <add> .decoders(List::clear) <add> .encoders(encoders -> encoders.add(new ByteArrayEncoder())) <add> .decoders(decoders -> decoders.add(new ByteArrayDecoder())) <add> .routeMatcher(new SimpleRouteMatcher(new AntPathMatcher())) <add> .metadataExtractor(new DefaultMetadataExtractor()) <add> .reactiveAdapterStrategy(new ReactiveAdapterRegistry()) <add> .build()); <add> <add> // Followed by further changes to other properties <add> handler.setDecoders(Collections.singletonList(StringDecoder.allMimeTypes())); <add> handler.setEncoders(Collections.singletonList(CharSequenceEncoder.allMimeTypes())); <add> handler.setRouteMatcher(new SimpleRouteMatcher(new AntPathMatcher())); <add> handler.setMetadataExtractor(new DefaultMetadataExtractor()); <add> handler.setReactiveAdapterRegistry(new ReactiveAdapterRegistry()); <add> handler.afterPropertiesSet(); <add> <add> // RSocketStrategies should reflect current state <add> RSocketStrategies strategies = handler.getRSocketStrategies(); <add> assertThat(strategies.encoders()).isEqualTo(handler.getEncoders()); <add> assertThat(strategies.decoders()).isEqualTo(handler.getDecoders()); <add> assertThat(strategies.routeMatcher()).isSameAs(handler.getRouteMatcher()); <add> assertThat(strategies.metadataExtractor()).isSameAs(handler.getMetadataExtractor()); <add> assertThat(strategies.reactiveAdapterRegistry()).isSameAs(handler.getReactiveAdapterRegistry()); <add> } <add> <ide> @Test <ide> public void mappings() { <ide> testMapping(new SimpleController(), "path");
6
Javascript
Javascript
add event.isdefaultprevented() as jquery
f37f0ea16edd4b487ef8812ff69a78d9a287fa60
<ide><path>src/jqLite.js <ide> forEach({ <ide> if (!event.target) { <ide> event.target = event.srcElement || document; <ide> } <add> <add> if (isUndefined(event.defaultPrevented)) { <add> var prevent = event.preventDefault; <add> event.preventDefault = function() { <add> event.defaultPrevented = true; <add> prevent.call(event); <add> }; <add> event.defaultPrevented = false; <add> } <add> <add> event.isDefaultPrevented = function() { <add> return event.defaultPrevented; <add> }; <add> <ide> forEach(eventHandler.fns, function(fn){ <ide> fn.call(element, event); <ide> }); <ide><path>test/jqLiteSpec.js <ide> describe('jqLite', function(){ <ide> <ide> browserTrigger(a, 'click'); <ide> }); <add> <add> it('should have event.isDefaultPrevented method', function() { <add> jqLite(a).bind('click', function(e) { <add> expect(function() { <add> expect(e.isDefaultPrevented()).toBe(false); <add> e.preventDefault(); <add> expect(e.isDefaultPrevented()).toBe(true); <add> }).not.toThrow(); <add> }); <add> <add> browserTrigger(a, 'click'); <add> }); <ide> }); <ide> <ide>
2
Javascript
Javascript
switch assertion for simulated ember.create
fa627f4421e8b665fc32dca8350a2649103a94eb
<ide><path>packages/ember-routing/tests/helpers/control_test.js <ide> if (Ember.ENV.EXPERIMENTAL_CONTROL_HELPER) { <ide> var childController = view.get('childViews').objectAt(0).get('controller'); <ide> <ide> ok(view.$().text().match(/^.*Tom Dale.*$/), "The view rendered"); <del> deepEqual(childController.get('model'), { name: "Tom Dale" }); <add> <add> if (Ember.create.isSimulated) { <add> equal(childController.get('model').name, "Tom Dale"); <add> } else { <add> deepEqual(childController.get('model'), { name: "Tom Dale" }); <add> } <ide> <ide> Ember.run(function() { <ide> controller.set('model', { name: "Yehuda Katz" }); <ide><path>packages/ember-routing/tests/helpers/render_test.js <ide> test("{{render}} helper should render given template with a supplied model", fun <ide> set(controller, 'post', { title: "Rails is unagi" }); <ide> <ide> equal(view.$().text(), 'HIRails is unagi'); <del> deepEqual(postController.get('model'), { title: "Rails is unagi" }); <add> if (Ember.create.isSimulated) { <add> equal(postController.get('model').title, "Rails is unagi"); <add> } else { <add> deepEqual(postController.get('model'), { title: "Rails is unagi" }); <add> } <ide> }); <ide> <ide> test("{{render}} helper should render with given controller", function() { <ide> test("{{render}} helper should render templates with models multiple times", fun <ide> set(controller, 'post1', { title: "I am new" }); <ide> <ide> equal(view.$().text(), 'HI I am new Then me'); <del> deepEqual(postController1.get('model'), { title: "I am new" }); <add> if (Ember.create.isSimulated) { <add> equal(postController1.get('model').title, "I am new"); <add> } else { <add> deepEqual(postController1.get('model'), { title: "I am new" }); <add> } <ide> }); <ide> <ide> test("{{render}} helper should render templates both with and without models", function() { <ide> test("{{render}} helper should render templates both with and without models", f <ide> set(controller, 'post', { title: "Rails is unagi" }); <ide> <ide> equal(view.$().text(), 'HI Title: Title:Rails is unagi'); <del> deepEqual(postController2.get('model'), { title: "Rails is unagi" }); <add> if (Ember.create.isSimulated) { <add> equal(postController2.get('model').title, "Rails is unagi"); <add> } else { <add> deepEqual(postController2.get('model'), { title: "Rails is unagi" }); <add> } <ide> }); <ide> <ide> test("{{render}} helper should link child controllers to the parent controller", function() {
2
PHP
PHP
guess policy method from class name
e0e3220ac9436ba94795d850ca07ca7fd766f86b
<ide><path>src/Illuminate/Foundation/Auth/Access/AuthorizesRequests.php <ide> public function authorizeForUser($user, $ability, $arguments = []) <ide> */ <ide> protected function parseAbilityAndArguments($ability, $arguments) <ide> { <del> if (is_string($ability)) { <add> if (is_string($ability) && (! class_exists($ability))) { <ide> return [$ability, $arguments]; <ide> } <ide> <ide><path>tests/Foundation/FoundationAuthorizesRequestsTraitTest.php <ide> public function test_basic_gate_check() <ide> <ide> $gate = $this->getBasicGate(); <ide> <del> $gate->define('foo', function () { <add> $gate->define('baz', function () { <ide> $_SERVER['_test.authorizes.trait'] = true; <ide> <ide> return true; <ide> }); <ide> <del> $response = (new FoundationTestAuthorizeTraitClass)->authorize('foo'); <add> $response = (new FoundationTestAuthorizeTraitClass)->authorize('baz'); <ide> <ide> $this->assertInstanceOf(Response::class, $response); <ide> $this->assertTrue($_SERVER['_test.authorizes.trait']); <ide> public function test_exception_is_thrown_if_gate_check_fails() <ide> { <ide> $gate = $this->getBasicGate(); <ide> <del> $gate->define('foo', function () { <add> $gate->define('baz', function () { <ide> return false; <ide> }); <ide> <del> (new FoundationTestAuthorizeTraitClass)->authorize('foo'); <add> (new FoundationTestAuthorizeTraitClass)->authorize('baz'); <ide> } <ide> <ide> public function test_policies_may_be_called() <ide> public function test_policies_may_be_called() <ide> $this->assertTrue($_SERVER['_test.authorizes.trait.policy']); <ide> } <ide> <del> public function test_policy_method_may_be_guessed() <add> public function test_policy_method_may_be_guessed_passing_model_instance() <ide> { <ide> unset($_SERVER['_test.authorizes.trait.policy']); <ide> <ide> $gate = $this->getBasicGate(); <ide> <ide> $gate->policy(FoundationAuthorizesRequestTestClass::class, FoundationAuthorizesRequestTestPolicy::class); <ide> <del> $response = (new FoundationTestAuthorizeTraitClass)->authorize([new FoundationAuthorizesRequestTestClass]); <add> $response = (new FoundationTestAuthorizeTraitClass)->authorize(new FoundationAuthorizesRequestTestClass); <add> <add> $this->assertInstanceOf(Response::class, $response); <add> $this->assertTrue($_SERVER['_test.authorizes.trait.policy']); <add> } <add> <add> public function test_policy_method_may_be_guessed_passing_class_name() <add> { <add> unset($_SERVER['_test.authorizes.trait.policy']); <add> <add> $gate = $this->getBasicGate(); <add> <add> $gate->policy(FoundationAuthorizesRequestTestClass::class, FoundationAuthorizesRequestTestPolicy::class); <add> <add> $response = (new FoundationTestAuthorizeTraitClass)->authorize(FoundationAuthorizesRequestTestClass::class); <ide> <ide> $this->assertInstanceOf(Response::class, $response); <ide> $this->assertTrue($_SERVER['_test.authorizes.trait.policy']); <ide> public function update() <ide> return true; <ide> } <ide> <del> public function test_policy_method_may_be_guessed() <add> public function test_policy_method_may_be_guessed_passing_model_instance() <add> { <add> $_SERVER['_test.authorizes.trait.policy'] = true; <add> <add> return true; <add> } <add> <add> public function test_policy_method_may_be_guessed_passing_class_name() <ide> { <ide> $_SERVER['_test.authorizes.trait.policy'] = true; <ide>
2
Javascript
Javascript
throw errors not strings
ee6014a3aa90232ed263fe9c9e0860c777b37a30
<ide><path>src/ng/location.js <ide> function convertToHashbangUrl(url, basePath, hashPrefix) { <ide> path = match.path.substr(pathPrefix.length); <ide> <ide> if (match.path.indexOf(pathPrefix) !== 0) { <del> throw 'Invalid url "' + url + '", missing path prefix "' + pathPrefix + '" !'; <add> throw Error('Invalid url "' + url + '", missing path prefix "' + pathPrefix + '" !'); <ide> } <ide> <ide> return composeProtocolHostPort(match.protocol, match.host, match.port) + basePath + <ide> function LocationUrl(url, pathPrefix) { <ide> var match = matchUrl(url, this); <ide> <ide> if (match.path.indexOf(pathPrefix) !== 0) { <del> throw 'Invalid url "' + url + '", missing path prefix "' + pathPrefix + '" !'; <add> throw Error('Invalid url "' + url + '", missing path prefix "' + pathPrefix + '" !'); <ide> } <ide> <ide> this.$$path = decodeURIComponent(match.path.substr(pathPrefix.length)); <ide> function LocationHashbangUrl(url, hashPrefix) { <ide> this.$$parse = function(url) { <ide> var match = matchUrl(url, this); <ide> <add> <ide> if (match.hash && match.hash.indexOf(hashPrefix) !== 0) { <del> throw 'Invalid url "' + url + '", missing hash prefix "' + hashPrefix + '" !'; <add> throw Error('Invalid url "' + url + '", missing hash prefix "' + hashPrefix + '" !'); <ide> } <ide> <ide> basePath = match.path + (match.search ? '?' + match.search : '');
1
Mixed
Ruby
fix exception when logging sql w/ nil binary value
12ff63b227e7ef01c7e57302c9999151dca157f1
<ide><path>activerecord/CHANGELOG.md <add>* Log nil binary column values correctly. <add> <add> When an object with a binary column is updated with a nil value <add> in that column, the SQL logger would throw an exception when trying <add> to log that nil value. This only occurs when updating a record <add> that already has a non-nil value in that column since an initial nil <add> value isn't included in the SQL anyway (at least, when dirty checking <add> is enabled.) The column's new value will now be logged as `<NULL binary data>` <add> to parallel the existing `<N bytes of binary data>` for non-nil values. <add> <add> *James Coleman* <add> <ide> * Stringify all variable keys of mysql connection configuration. <ide> <ide> When the `sql_mode` variable for mysql adapters is set in the configuration <ide><path>activerecord/lib/active_record/log_subscriber.rb <ide> def render_bind(column, value) <ide> if column.binary? <ide> # This specifically deals with the PG adapter that casts bytea columns into a Hash. <ide> value = value[:value] if value.is_a?(Hash) <del> value = "<#{value.bytesize} bytes of binary data>" <add> value = value.nil? ? "<NULL binary data>" : "<#{value.bytesize} bytes of binary data>" <ide> end <ide> <ide> [column.name, value] <ide><path>activerecord/test/cases/log_subscriber_test.rb <ide> def test_binary_data_is_not_logged <ide> wait <ide> assert_match(/<16 bytes of binary data>/, @logger.logged(:debug).join) <ide> end <add> <add> def test_nil_binary_data_is_logged <add> binary = Binary.create(data: "") <add> binary.update_attributes(data: nil) <add> wait <add> assert_match(/<NULL binary data>/, @logger.logged(:debug).join) <add> end <ide> end <ide> end
3
Javascript
Javascript
add api doc for ember.alias
a1fa3dbabe633dc8ee46a832c89ee781089c6c28
<ide><path>packages/ember-metal/lib/mixin.js <ide> Alias = function(methodName) { <ide> }; <ide> Alias.prototype = new Ember.Descriptor(); <ide> <add>/** <add> Makes a property or method available via an additional name. <add> <add> App.PaintSample = Ember.Object.extend({ <add> color: 'red', <add> colour: Ember.alias('color'), <add> name: function(){ <add> return "Zed"; <add> }, <add> moniker: Ember.alias("name") <add> }); <add> var paintSample = App.PaintSample.create() <add> paintSample.get('colour'); //=> 'red' <add> paintSample.moniker(); //=> 'Zed' <add> <add> @param {String} methodName name of the method or property to alias <add> @returns {Ember.Descriptor} <add>*/ <ide> Ember.alias = function(methodName) { <ide> return new Alias(methodName); <ide> };
1
Ruby
Ruby
eliminate a place where argv is mutated
c2dcd91bd1ff2b88836089043bbfffacbbc5c6f3
<ide><path>Library/Homebrew/build.rb <ide> def post_superenv_hacks <ide> end <ide> end <ide> <del> def pre_superenv_hacks <del> # Allow a formula to opt-in to the std environment. <del> if (formula.env.std? || deps.any? { |d| d.name == "scons" }) && ARGV.env != "super" <del> ARGV.unshift "--env=std" <del> end <del> end <del> <ide> def effective_build_options_for(dependent) <ide> args = dependent.build.used_options <ide> args |= Tab.for_formula(dependent).used_options <ide> def install <ide> fixopt(dep) unless dep.opt_prefix.directory? <ide> end <ide> <del> pre_superenv_hacks <ide> ENV.activate_extensions! <ide> <ide> if superenv? <ide><path>Library/Homebrew/formula_installer.rb <ide> def sanitized_ARGV_options <ide> args << "--verbose" if verbose? <ide> args << "--debug" if debug? <ide> args << "--cc=#{ARGV.cc}" if ARGV.cc <del> args << "--env=#{ARGV.env}" if ARGV.env <add> <add> if ARGV.env <add> args << "--env=#{ARGV.env}" <add> elsif formula.env.std? || formula.recursive_dependencies.any? { |d| d.name == "scons" } <add> args << "--env=std" <add> end <ide> <ide> if formula.head? <ide> args << "--HEAD"
2
Javascript
Javascript
remove extraneous space
88650aaa70768755524e7ad1a7aa8074ac9b1983
<ide><path>test/parallel/test-zlib-truncated.js <ide> <ide> require('../common'); <ide> const assert = require('assert'); <del>const zlib = require ('zlib'); <add>const zlib = require('zlib'); <ide> <ide> const inputString = 'ΩΩLorem ipsum dolor sit amet, consectetur adipiscing eli' + <ide> 't. Morbi faucibus, purus at gravida dictum, libero arcu ' +
1
Text
Text
add beginning visual basic
64385d2c6e9d777afd44fa451f40faac18ab5795
<ide><path>guide/english/book-recommendations/index.md <ide> title: Books to Read for Programmers <ide> - [Amazon Smile](https://smile.amazon.com/Soft-Skills-software-developers-manual/dp/1617292397?pldnSite=1) <ide> - ISBN-13: 9781617292392 <ide> <add>*The Innovator's Delimma* <add>- [Amazon] (https://www.amazon.com/Innovators-Dilemma-Technologies-Management-Innovation-ebook/dp/B00E257S86_ <add>- ASIN: B00E257S86 <add> <add>## Visual Basic <add> <add>*Beginning Visual Basic: A Step by Step Computer Programming Tutorial by Philip Conrod* <add>- ISBN-13: 978-1-937161-75-0 <ide> <ide> This list was compiled from multiple suggestion threads on Reddit and Stackoverflow. <ide> <ide> Please feel free to add more that you have found useful! <add>
1
Javascript
Javascript
fix spaces -> tabs
77185993df7c7e1171c8b3bac9b551ea616d54a8
<ide><path>src/Chart.Doughnut.js <ide> addData : function(segment, atIndex, silent){ <ide> var index = atIndex !== undefined ? atIndex : this.segments.length; <ide> if ( typeof(segment.color) === "undefined" ) { <del> segment.color = Chart.defaults.global.segmentColorDefault[index]; <del> segment.highlight = Chart.defaults.global.segmentHighlightColorDefaults[index]; <add> segment.color = Chart.defaults.global.segmentColorDefault[index]; <add> segment.highlight = Chart.defaults.global.segmentHighlightColorDefaults[index]; <ide> } <ide> this.segments.splice(index, 0, new this.SegmentArc({ <ide> value : segment.value,
1
Text
Text
reflect c03bba4 in changelogs
5f35c60f848dda275e34bc055f58031c08d7b416
<ide><path>activesupport/CHANGELOG.md <add>* Add support for tracing constant autoloads. Just throw <add> <add> ActiveSupport::Dependencies.logger = Rails.logger <add> ActiveSupport::Dependencies.verbose = true <add> <add> in an initializer. <add> <add> *Xavier Noria* <add> <ide> * Maintain `html_safe?` on html_safe strings when sliced. <ide> <ide> string = "<div>test</div>".html_safe <ide><path>guides/CHANGELOG.md <add>* New section _Troubleshooting_ in the _Autoloading and Reloading Constants_ guide. <add> <add> *Xavier Noria* <add> <ide> * Rails 6 requires Ruby 2.4.1 or newer. <ide> <ide> *Jeremy Daer*
2
Ruby
Ruby
take failed screenshot before reset driver
3da239a2cd43d04c5f972fa8f8e57e64a139194c
<ide><path>actionpack/lib/action_dispatch/system_testing/test_helpers/setup_and_teardown.rb <ide> def before_setup <ide> end <ide> <ide> def after_teardown <del> super <ide> take_failed_screenshot <add> super <ide> Capybara.reset_sessions! <ide> end <ide> end
1
Javascript
Javascript
fix exit code when linting from ci
1264cecde771a391c3ae192b41c321371cf95fa5
<ide><path>tools/jslint.js <ide> if (cluster.isMaster) { <ide> sendWork(worker); <ide> }); <ide> <del> process.on('exit', function() { <add> process.on('exit', function(code) { <ide> if (showProgress) { <ide> curPath = 'Done'; <ide> printProgress(); <ide> outFn('\r\n'); <ide> } <del> process.exit(failures ? 1 : 0); <add> if (code === 0) <add> process.exit(failures ? 1 : 0); <ide> }); <ide> <ide> for (i = 0; i < numCPUs; ++i) <del> cluster.fork().on('message', onWorkerMessage); <add> cluster.fork().on('message', onWorkerMessage).on('exit', onWorkerExit); <ide> <ide> function onWorkerMessage(results) { <ide> if (typeof results !== 'number') { <ide> // The worker sent us results that are not all successes <del> if (!workerConfig.sendAll) <add> if (workerConfig.sendAll) { <add> failures += results.errorCount; <add> results = results.results; <add> } else { <ide> failures += results.length; <add> } <ide> outFn(formatter(results) + '\r\n'); <ide> printProgress(); <ide> } else { <ide> if (cluster.isMaster) { <ide> sendWork(this); <ide> } <ide> <add> function onWorkerExit(code, signal) { <add> if (code !== 0 || signal) <add> process.exit(2); <add> } <add> <ide> function sendWork(worker) { <ide> if (!files || !files.length) { <ide> // We either just started or we have no more files to lint for the current <ide> if (cluster.isMaster) { <ide> } <ide> } <ide> } <del> process.send(results); <add> process.send({ results: results, errorCount: report.errorCount }); <ide> } else if (report.errorCount === 0) { <ide> // No errors, return number of successful lint operations <ide> process.send(files.length);
1
Go
Go
fix symlink handling in builder add/copy commands
47da59f7ec4ee0f49d47a9b32abb137bb30b2c48
<ide><path>builder/builder.go <ide> type Context interface { <ide> Close() error <ide> // Stat returns an entry corresponding to path if any. <ide> // It is recommended to return an error if path was not found. <del> Stat(path string) (FileInfo, error) <add> // If path is a symlink it also returns the path to the target file. <add> Stat(path string) (string, FileInfo, error) <ide> // Open opens path from the context and returns a readable stream of it. <ide> Open(path string) (io.ReadCloser, error) <ide> // Walk walks the tree of the context with the function passed to it. <ide> type PathFileInfo struct { <ide> os.FileInfo <ide> // FilePath holds the absolute path to the file. <ide> FilePath string <add> // Name holds the basename for the file. <add> FileName string <ide> } <ide> <ide> // Path returns the absolute path to the file. <ide> func (fi PathFileInfo) Path() string { <ide> return fi.FilePath <ide> } <ide> <add>// Name returns the basename of the file. <add>func (fi PathFileInfo) Name() string { <add> if fi.FileName != "" { <add> return fi.FileName <add> } <add> return fi.FileInfo.Name() <add>} <add> <ide> // Hashed defines an extra method intended for implementations of os.FileInfo. <ide> type Hashed interface { <ide> // Hash returns the hash of a file. <ide><path>builder/dockerfile/internals.go <ide> func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression <ide> <ide> // Must be a dir or a file <ide> <del> fi, err := b.context.Stat(origPath) <add> statPath, fi, err := b.context.Stat(origPath) <ide> if err != nil { <ide> return nil, err <ide> } <ide> func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression <ide> hfi.SetHash("file:" + hfi.Hash()) <ide> return copyInfos, nil <ide> } <del> <ide> // Must be a dir <del> <ide> var subfiles []string <del> b.context.Walk(origPath, func(path string, info builder.FileInfo, err error) error { <add> err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { <ide> if err != nil { <ide> return err <ide> } <ide> // we already checked handleHash above <ide> subfiles = append(subfiles, info.(builder.Hashed).Hash()) <ide> return nil <ide> }) <add> if err != nil { <add> return nil, err <add> } <ide> <ide> sort.Strings(subfiles) <ide> hasher := sha256.New() <ide> func (b *Builder) readDockerfile() error { <ide> // back to 'Dockerfile' and use that in the error message. <ide> if b.DockerfileName == "" { <ide> b.DockerfileName = api.DefaultDockerfileName <del> if _, err := b.context.Stat(b.DockerfileName); os.IsNotExist(err) { <add> if _, _, err := b.context.Stat(b.DockerfileName); os.IsNotExist(err) { <ide> lowercase := strings.ToLower(b.DockerfileName) <del> if _, err := b.context.Stat(lowercase); err == nil { <add> if _, _, err := b.context.Stat(lowercase); err == nil { <ide> b.DockerfileName = lowercase <ide> } <ide> } <ide><path>builder/tarsum.go <ide> import ( <ide> "io" <ide> "os" <ide> "path/filepath" <del> "strings" <ide> <ide> "github.com/docker/docker/pkg/archive" <ide> "github.com/docker/docker/pkg/chrootarchive" <ide> func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { <ide> return r, nil <ide> } <ide> <del>func (c *tarSumContext) Stat(path string) (fi FileInfo, err error) { <add>func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { <ide> cleanpath, fullpath, err := c.normalize(path) <ide> if err != nil { <del> return nil, err <add> return "", nil, err <ide> } <ide> <ide> st, err := os.Lstat(fullpath) <ide> if err != nil { <del> return nil, convertPathError(err, cleanpath) <add> return "", nil, convertPathError(err, cleanpath) <add> } <add> <add> rel, err := filepath.Rel(c.root, fullpath) <add> if err != nil { <add> return "", nil, convertPathError(err, cleanpath) <ide> } <ide> <del> fi = PathFileInfo{st, fullpath} <del> // we set sum to path by default for the case where GetFile returns nil. <del> // The usual case is if cleanpath is empty. <add> // We set sum to path by default for the case where GetFile returns nil. <add> // The usual case is if relative path is empty. <ide> sum := path <del> if tsInfo := c.sums.GetFile(cleanpath); tsInfo != nil { <add> // Use the checksum of the followed path(not the possible symlink) because <add> // this is the file that is actually copied. <add> if tsInfo := c.sums.GetFile(rel); tsInfo != nil { <ide> sum = tsInfo.Sum() <ide> } <del> fi = &HashedFileInfo{fi, sum} <del> return fi, nil <add> fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} <add> return rel, fi, nil <ide> } <ide> <ide> // MakeTarSumContext returns a build Context from a tar stream. <ide> func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err <ide> if err != nil { <ide> return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) <ide> } <del> _, err = os.Stat(fullpath) <add> _, err = os.Lstat(fullpath) <ide> if err != nil { <ide> return "", "", convertPathError(err, path) <ide> } <ide> return <ide> } <ide> <ide> func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { <del> for _, tsInfo := range c.sums { <del> path := tsInfo.Name() <del> path, fullpath, err := c.normalize(path) <add> root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) <add> return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { <add> rel, err := filepath.Rel(c.root, fullpath) <ide> if err != nil { <ide> return err <ide> } <del> <del> // Any file in the context that starts with the given path will be <del> // picked up and its hashcode used. However, we'll exclude the <del> // root dir itself. We do this for a coupel of reasons: <del> // 1 - ADD/COPY will not copy the dir itself, just its children <del> // so there's no reason to include it in the hash calc <del> // 2 - the metadata on the dir will change when any child file <del> // changes. This will lead to a miss in the cache check if that <del> // child file is in the .dockerignore list. <del> if rel, err := filepath.Rel(root, path); err != nil { <del> return err <del> } else if rel == "." || strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { <del> continue <add> if rel == "." { <add> return nil <ide> } <ide> <del> info, err := os.Lstat(fullpath) <del> if err != nil { <del> return convertPathError(err, path) <add> sum := rel <add> if tsInfo := c.sums.GetFile(rel); tsInfo != nil { <add> sum = tsInfo.Sum() <ide> } <del> // TODO check context breakout? <del> fi := &HashedFileInfo{PathFileInfo{info, fullpath}, tsInfo.Sum()} <del> if err := walkFn(path, fi, nil); err != nil { <add> fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} <add> if err := walkFn(rel, fi, nil); err != nil { <ide> return err <ide> } <del> } <del> return nil <add> return nil <add> }) <ide> } <ide> <ide> func (c *tarSumContext) Remove(path string) error { <ide><path>daemon/daemonbuilder/builder.go <ide> func (d Docker) Copy(c *daemon.Container, destPath string, src builder.FileInfo, <ide> <ide> // only needed for fixPermissions, but might as well put it before CopyFileWithTar <ide> if destExists && destStat.IsDir() { <del> destPath = filepath.Join(destPath, filepath.Base(srcPath)) <add> destPath = filepath.Join(destPath, src.Name()) <ide> } <ide> <ide> if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { <ide><path>integration-cli/docker_cli_build_test.go <ide> import ( <ide> <ide> "github.com/docker/docker/builder/dockerfile/command" <ide> "github.com/docker/docker/pkg/archive" <add> "github.com/docker/docker/pkg/integration/checker" <ide> "github.com/docker/docker/pkg/stringutils" <ide> "github.com/go-check/check" <ide> ) <ide> func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { <ide> c.Assert(err, check.IsNil) <ide> c.Assert(id1, check.Equals, id2) <ide> } <add> <add>// #17290 <add>func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { <add> testRequires(c, DaemonIsLinux) <add> name := "testbuildbrokensymlink" <add> ctx, err := fakeContext(` <add> FROM busybox <add> COPY . ./`, <add> map[string]string{ <add> "foo": "bar", <add> }) <add> c.Assert(err, checker.IsNil) <add> defer ctx.Close() <add> <add> err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) <add> c.Assert(err, checker.IsNil) <add> <add> // warm up cache <add> _, err = buildImageFromContext(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> <add> // add new file to context, should invalidate cache <add> err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) <add> c.Assert(err, checker.IsNil) <add> <add> _, out, err := buildImageFromContextWithOut(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> <add> c.Assert(out, checker.Not(checker.Contains), "Using cache") <add> <add>} <add> <add>func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { <add> testRequires(c, DaemonIsLinux) <add> name := "testbuildbrokensymlink" <add> ctx, err := fakeContext(` <add> FROM busybox <add> COPY asymlink target`, <add> map[string]string{ <add> "foo": "bar", <add> }) <add> c.Assert(err, checker.IsNil) <add> defer ctx.Close() <add> <add> err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) <add> c.Assert(err, checker.IsNil) <add> <add> id, err := buildImageFromContext(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> <add> out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") <add> c.Assert(out, checker.Matches, "bar") <add> <add> // change target file should invalidate cache <add> err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) <add> c.Assert(err, checker.IsNil) <add> <add> id, out, err = buildImageFromContextWithOut(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> c.Assert(out, checker.Not(checker.Contains), "Using cache") <add> <add> out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") <add> c.Assert(out, checker.Matches, "baz") <add>} <add> <add>func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { <add> testRequires(c, DaemonIsLinux) <add> name := "testbuildbrokensymlink" <add> ctx, err := fakeContext(` <add> FROM busybox <add> COPY asymlink /`, <add> map[string]string{ <add> "foo/abc": "bar", <add> "foo/def": "baz", <add> }) <add> c.Assert(err, checker.IsNil) <add> defer ctx.Close() <add> <add> err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) <add> c.Assert(err, checker.IsNil) <add> <add> id, err := buildImageFromContext(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> <add> out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") <add> c.Assert(out, checker.Matches, "barbaz") <add> <add> // change target file should invalidate cache <add> err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) <add> c.Assert(err, checker.IsNil) <add> <add> id, out, err = buildImageFromContextWithOut(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> c.Assert(out, checker.Not(checker.Contains), "Using cache") <add> <add> out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") <add> c.Assert(out, checker.Matches, "barbax") <add> <add>} <add> <add>// TestBuildSymlinkBasename tests that target file gets basename from symlink, <add>// not from the target file. <add>func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { <add> testRequires(c, DaemonIsLinux) <add> name := "testbuildbrokensymlink" <add> ctx, err := fakeContext(` <add> FROM busybox <add> COPY asymlink /`, <add> map[string]string{ <add> "foo": "bar", <add> }) <add> c.Assert(err, checker.IsNil) <add> defer ctx.Close() <add> <add> err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) <add> c.Assert(err, checker.IsNil) <add> <add> id, err := buildImageFromContext(name, ctx, true) <add> c.Assert(err, checker.IsNil) <add> <add> out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") <add> c.Assert(out, checker.Matches, "bar") <add> <add>} <ide><path>integration-cli/docker_utils.go <ide> func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (s <ide> } <ide> <ide> func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { <add> id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) <add> if err != nil { <add> return "", err <add> } <add> return id, nil <add>} <add> <add>func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { <ide> args := []string{"build", "-t", name} <ide> if !useCache { <ide> args = append(args, "--no-cache") <ide> func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFl <ide> buildCmd.Dir = ctx.Dir <ide> out, exitCode, err := runCommandWithOutput(buildCmd) <ide> if err != nil || exitCode != 0 { <del> return "", fmt.Errorf("failed to build the image: %s", out) <add> return "", "", fmt.Errorf("failed to build the image: %s", out) <ide> } <del> return getIDByName(name) <add> id, err := getIDByName(name) <add> if err != nil { <add> return "", "", err <add> } <add> return id, out, nil <ide> } <ide> <ide> func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) {
6
Python
Python
set version to v2.1.0.dev0
66496ac8e1933143decaee299a2e91551db928c0
<ide><path>spacy/about.py <ide> # https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py <ide> <ide> __title__ = 'spacy' <del>__version__ = '2.0.8' <add>__version__ = '2.1.0.dev0' <ide> __summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython' <ide> __uri__ = 'https://spacy.io' <ide> __author__ = 'Explosion AI' <ide> __email__ = 'contact@explosion.ai' <ide> __license__ = 'MIT' <del>__release__ = True <add>__release__ = False <ide> <ide> __docs_models__ = 'https://spacy.io/usage/models' <ide> __download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
1
Javascript
Javascript
fix publishing of canary builds
ca94affbe1c3bc45583c04167a20e9328580c215
<ide><path>config/s3ProjectConfig.js <ide> fileMap = function(revision,tag,date) { <ide> "ember-runtime.js": fileObject("ember-runtime", ".js", "text/javascript", revision, tag, date), <ide> "ember.min.js": fileObject("ember.min", ".js", "text/javascript", revision, tag, date), <ide> "ember.prod.js": fileObject("ember.prod", ".js", "text/javascript", revision, tag, date), <del> "../docs/build/data.json": fileObject("ember-docs", ".json", "application/json", revision, tag, date) <add>// "../docs/build/data.json": fileObject("ember-docs", ".json", "application/json", revision, tag, date) <ide> }; <ide> }; <ide>
1
Javascript
Javascript
remove noerrorsplugin from examples
be55659360722862b4efd153e992423f631a3de5
<ide><path>examples/async/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/counter/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/real-world/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/shopping-cart/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/todomvc/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/todos-with-undo/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [{ <ide><path>examples/todos/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/tree-view/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [ <ide><path>examples/universal/webpack.config.js <ide> module.exports = { <ide> }, <ide> plugins: [ <ide> new webpack.optimize.OccurenceOrderPlugin(), <del> new webpack.HotModuleReplacementPlugin(), <del> new webpack.NoErrorsPlugin() <add> new webpack.HotModuleReplacementPlugin() <ide> ], <ide> module: { <ide> loaders: [
9
Javascript
Javascript
sanitize doctype declarations correctly
e66c23fe55f8571a014b0686c8dbca128e7a8240
<ide><path>src/ngSanitize/sanitize.js <ide> var START_TAG_REGEXP = /^<\s*([\w:-]+)((?:\s+[\w:-]+(?:\s*=\s*(?:(?:"[^"]*")|(?: <ide> BEGIN_TAG_REGEXP = /^</, <ide> BEGING_END_TAGE_REGEXP = /^<\s*\//, <ide> COMMENT_REGEXP = /<!--(.*?)-->/g, <add> DOCTYPE_REGEXP = /<!DOCTYPE([^>]*?)>/i, <ide> CDATA_REGEXP = /<!\[CDATA\[(.*?)]]>/g, <ide> URI_REGEXP = /^((ftp|https?):\/\/|mailto:|tel:|#)/i, <ide> NON_ALPHANUMERIC_REGEXP = /([^\#-~| |!])/g; // Match everything outside of normal chars and " (quote character) <ide> function htmlParser( html, handler ) { <ide> html = html.substring( index + 3 ); <ide> chars = false; <ide> } <add> // DOCTYPE <add> } else if ( DOCTYPE_REGEXP.test(html) ) { <add> match = html.match( DOCTYPE_REGEXP ); <ide> <add> if ( match ) { <add> html = html.replace( match[0] , ''); <add> chars = false; <add> } <ide> // end tag <ide> } else if ( BEGING_END_TAGE_REGEXP.test(html) ) { <ide> match = html.match( END_TAG_REGEXP ); <ide><path>test/ngSanitize/sanitizeSpec.js <ide> describe('HTML', function() { <ide> attrs: attrs, <ide> unary: unary <ide> }; <del> // Since different browsers handle newlines differenttly we trim <add> // Since different browsers handle newlines differently we trim <ide> // so that it is easier to write tests. <ide> angular.forEach(attrs, function(value, key) { <ide> attrs[key] = value.replace(/^\s*/, '').replace(/\s*$/, '') <ide> describe('HTML', function() { <ide> expectHTML('a<SCRIPT>evil< / scrIpt >c.').toEqual('ac.'); <ide> }); <ide> <add> it('should remove DOCTYPE header', function() { <add> expectHTML('<!DOCTYPE html>').toEqual(''); <add> expectHTML('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"\n"http://www.w3.org/TR/html4/strict.dtd">').toEqual(''); <add> expectHTML('a<!DOCTYPE html>c.').toEqual('ac.'); <add> expectHTML('a<!DocTyPe html>c.').toEqual('ac.'); <add> }); <add> <ide> it('should remove nested script', function() { <ide> expectHTML('a< SCRIPT >A< SCRIPT >evil< / scrIpt >B< / scrIpt >c.').toEqual('ac.'); <ide> }); <ide> describe('HTML', function() { <ide> }); <ide> }); <ide> <add> <ide> }); <ide> });
2
Ruby
Ruby
fix call sites
c401c43850e79a4d994e22dd8d82a69612bb947f
<ide><path>actionpack/lib/action_controller/railties/helpers.rb <ide> def inherited(klass) <ide> super <ide> return unless klass.respond_to?(:helpers_path=) <ide> <del> if namespace = klass.parents.detect { |m| m.respond_to?(:railtie_helpers_paths) } <add> if namespace = klass.module_parents.detect { |m| m.respond_to?(:railtie_helpers_paths) } <ide> paths = namespace.railtie_helpers_paths <ide> else <ide> paths = ActionController::Helpers.helpers_path <ide><path>actionpack/lib/action_dispatch/routing/route_set.rb <ide> def include_helpers(klass, include_path_helpers) <ide> end <ide> <ide> def include_helpers_now(klass, include_path_helpers) <del> namespace = klass.parents.detect { |m| m.respond_to?(:railtie_include_helpers) } <add> namespace = klass.module_parents.detect { |m| m.respond_to?(:railtie_include_helpers) } <ide> <ide> if namespace && namespace.railtie_namespace.routes != self <ide> namespace.railtie_include_helpers(klass, include_path_helpers) <ide><path>activemodel/lib/active_model/naming.rb <ide> def self.extended(base) #:nodoc: <ide> # Person.model_name.plural # => "people" <ide> def model_name <ide> @_model_name ||= begin <del> namespace = parents.detect do |n| <add> namespace = module_parents.detect do |n| <ide> n.respond_to?(:use_relative_model_naming?) && n.use_relative_model_naming? <ide> end <ide> ActiveModel::Name.new(self, namespace) <ide><path>activerecord/lib/active_record/associations/builder/collection_association.rb <ide> def self.define_extensions(model, name) <ide> if block_given? <ide> extension_module_name = "#{model.name.demodulize}#{name.to_s.camelize}AssociationExtension" <ide> extension = Module.new(&Proc.new) <del> model.parent.const_set(extension_module_name, extension) <add> model.module_parent.const_set(extension_module_name, extension) <ide> end <ide> end <ide> <ide><path>activerecord/lib/active_record/model_schema.rb <ide> def reset_table_name #:nodoc: <ide> end <ide> <ide> def full_table_name_prefix #:nodoc: <del> (parents.detect { |p| p.respond_to?(:table_name_prefix) } || self).table_name_prefix <add> (module_parents.detect { |p| p.respond_to?(:table_name_prefix) } || self).table_name_prefix <ide> end <ide> <ide> def full_table_name_suffix #:nodoc: <del> (parents.detect { |p| p.respond_to?(:table_name_suffix) } || self).table_name_suffix <add> (module_parents.detect { |p| p.respond_to?(:table_name_suffix) } || self).table_name_suffix <ide> end <ide> <ide> # The array of names of environments where destructive actions should be prohibited. By default, <ide> def undecorated_table_name(class_name = base_class.name) <ide> def compute_table_name <ide> if base_class? <ide> # Nested classes are prefixed with singular parent table name. <del> if parent < Base && !parent.abstract_class? <del> contained = parent.table_name <del> contained = contained.singularize if parent.pluralize_table_names <add> if module_parent < Base && !module_parent.abstract_class? <add> contained = module_parent.table_name <add> contained = contained.singularize if module_parent.pluralize_table_names <ide> contained += "_" <ide> end <ide> <ide><path>activesupport/lib/active_support/dependencies.rb <ide> def load_missing_constant(from_mod, const_name) <ide> end <ide> elsif mod = autoload_module!(from_mod, const_name, qualified_name, path_suffix) <ide> return mod <del> elsif (parent = from_mod.parent) && parent != from_mod && <del> ! from_mod.parents.any? { |p| p.const_defined?(const_name, false) } <add> elsif (parent = from_mod.module_parent) && parent != from_mod && <add> ! from_mod.module_parents.any? { |p| p.const_defined?(const_name, false) } <ide> # If our parents do not have a constant named +const_name+ then we are free <ide> # to attempt to load upwards. If they do have such a constant, then this <ide> # const_missing must be due to from_mod::const_name, which should not <ide><path>railties/lib/rails/railtie.rb <ide> def config <ide> end <ide> <ide> def railtie_namespace #:nodoc: <del> @railtie_namespace ||= self.class.parents.detect { |n| n.respond_to?(:railtie_namespace) } <add> @railtie_namespace ||= self.class.module_parents.detect { |n| n.respond_to?(:railtie_namespace) } <ide> end <ide> <ide> protected
7
Javascript
Javascript
ensure empty socket on error
1a3ca8223e08d82051655d7d7e2ea31b439f1bf1
<ide><path>lib/_http_client.js <ide> function socketCloseListener() { <ide> <ide> function socketErrorListener(err) { <ide> var socket = this; <del> var parser = socket.parser; <ide> var req = socket._httpMessage; <ide> debug('SOCKET ERROR:', err.message, err.stack); <ide> <ide> function socketErrorListener(err) { <ide> req.socket._hadError = true; <ide> } <ide> <add> // Handle any pending data <add> socket.read(); <add> <add> var parser = socket.parser; <ide> if (parser) { <ide> parser.finish(); <ide> freeParser(parser, req, socket); <ide> } <add> <add> // Ensure that no further data will come out of the socket <add> socket.removeListener('data', socketOnData); <add> socket.removeListener('end', socketOnEnd); <ide> socket.destroy(); <ide> } <ide> <ide><path>test/parallel/test-http-client-read-in-error.js <add>var net = require('net'); <add>var http = require('http'); <add>var util = require('util'); <add> <add>function Agent() { <add> http.Agent.call(this); <add>} <add>util.inherits(Agent, http.Agent); <add> <add>Agent.prototype.createConnection = function() { <add> var self = this; <add> var socket = new net.Socket(); <add> <add> socket.on('error', function() { <add> socket.push('HTTP/1.1 200\r\n\r\n'); <add> }); <add> <add> socket.on('newListener', function onNewListener(name) { <add> if (name !== 'error') <add> return; <add> socket.removeListener('newListener', onNewListener); <add> <add> // Let other listeners to be set up too <add> process.nextTick(function() { <add> self.breakSocket(socket); <add> }); <add> }); <add> <add> return socket; <add>}; <add> <add>Agent.prototype.breakSocket = function breakSocket(socket) { <add> socket.emit('error', new Error('Intentional error')); <add>}; <add> <add>var agent = new Agent(); <add> <add>http.request({ <add> agent: agent <add>}).once('error', function() { <add> console.log('ignore'); <add>});
2
Javascript
Javascript
pull reusable logic out of `bundler`
66d51bb5d1f7df07694698094b2dbe6a6642a05c
<ide><path>packager/src/Bundler/index.js <ide> const defaults = require('../../defaults'); <ide> const os = require('os'); <ide> const invariant = require('fbjs/lib/invariant'); <ide> <add>const {generateAssetTransformResult, isAssetTypeAnImage} = require('./util'); <add> <ide> const { <ide> sep: pathSeparator, <ide> join: joinPath, <ide> const { <ide> log, <ide> } = require('../Logger'); <ide> <del>const assetPropertyBlacklist = new Set([ <del> 'files', <del> 'fileSystemLocation', <del> 'path', <del>]); <del> <ide> export type PostProcessModulesOptions = {| <ide> dev: boolean, <ide> minify: boolean, <ide> class Bundler { <ide> assetUrlPath = assetUrlPath.replace(/\\/g, '/'); <ide> } <ide> <del> const isImage = Bundler.isAssetTypeAnImage(extname(module.path).slice(1)); <add> const isImage = isAssetTypeAnImage(extname(module.path).slice(1)); <ide> <ide> return this._assetServer.getAssetData(relPath, platform).then(assetData => { <ide> return Promise.all([isImage ? sizeOf(assetData.files[0]) : null, assetData]); <ide> class Bundler { <ide> <ide> return this._applyAssetPlugins(assetPlugins, asset); <ide> }).then(asset => { <del> const {code, dependencies, dependencyOffsets} = Bundler.generateAssetTransformResult(asset); <add> const {code, dependencies, dependencyOffsets} = generateAssetTransformResult(asset); <ide> return { <ide> asset, <ide> code, <ide> class Bundler { <ide> }); <ide> } <ide> <del> // Test extension against all types supported by image-size module. <del> // If it's not one of these, we won't treat it as an image. <del> static isAssetTypeAnImage(type: string): boolean { <del> return [ <del> 'png', 'jpg', 'jpeg', 'bmp', 'gif', 'webp', 'psd', 'svg', 'tiff', <del> ].indexOf(type) !== -1; <del> } <del> <del> static generateAssetTransformResult(assetDescriptor: AssetDescriptor): {| <del> code: string, <del> dependencies: Array<string>, <del> dependencyOffsets: Array<number>, <del> |} { <del> const properDescriptor = filterObject(assetDescriptor, assetPropertyBlacklist); <del> const json = JSON.stringify(properDescriptor); <del> const assetRegistryPath = 'react-native/Libraries/Image/AssetRegistry'; <del> const code = <del> `module.exports = require(${JSON.stringify(assetRegistryPath)}).registerAsset(${json});`; <del> const dependencies = [assetRegistryPath]; <del> const dependencyOffsets = [code.indexOf(assetRegistryPath) - 1]; <del> return {code, dependencies, dependencyOffsets}; <del> } <del> <ide> _applyAssetPlugins( <ide> assetPlugins: Array<string>, <ide> asset: ExtendedAssetDescriptor, <ide> function getMainModule({dependencies, numPrependedDependencies = 0}) { <ide> return dependencies[numPrependedDependencies]; <ide> } <ide> <del>function filterObject(object, blacklist) { <del> const copied = Object.assign({}, object); <del> for (const key of blacklist) { <del> delete copied[key]; <del> } <del> return copied; <del>} <del> <ide> module.exports = Bundler; <ide><path>packager/src/Bundler/util.js <add>/** <add> * Copyright (c) 2015-present, Facebook, Inc. <add> * All rights reserved. <add> * <add> * This source code is licensed under the BSD-style license found in the <add> * LICENSE file in the root directory of this source tree. An additional grant <add> * of patent rights can be found in the PATENTS file in the same directory. <add> * <add> * @flow <add> */ <add> <add>'use strict'; <add> <add>import type {AssetDescriptor} from '.'; <add> <add>const assetPropertyBlacklist = new Set([ <add> 'files', <add> 'fileSystemLocation', <add> 'path', <add>]); <add> <add>function generateAssetTransformResult(assetDescriptor: AssetDescriptor): {| <add> code: string, <add> dependencies: Array<string>, <add> dependencyOffsets: Array<number>, <add>|} { <add> const properDescriptor = filterObject(assetDescriptor, assetPropertyBlacklist); <add> const json = JSON.stringify(properDescriptor); <add> const assetRegistryPath = 'react-native/Libraries/Image/AssetRegistry'; <add> const code = <add> `module.exports = require(${JSON.stringify(assetRegistryPath)}).registerAsset(${json});`; <add> const dependencies = [assetRegistryPath]; <add> const dependencyOffsets = [code.indexOf(assetRegistryPath) - 1]; <add> return {code, dependencies, dependencyOffsets}; <add>} <add> <add>// Test extension against all types supported by image-size module. <add>// If it's not one of these, we won't treat it as an image. <add>function isAssetTypeAnImage(type: string): boolean { <add> return [ <add> 'png', 'jpg', 'jpeg', 'bmp', 'gif', 'webp', 'psd', 'svg', 'tiff', <add> ].indexOf(type) !== -1; <add>} <add> <add>function filterObject(object, blacklist) { <add> const copied = Object.assign({}, object); <add> for (const key of blacklist) { <add> delete copied[key]; <add> } <add> return copied; <add>} <add> <add>exports.generateAssetTransformResult = generateAssetTransformResult; <add>exports.isAssetTypeAnImage = isAssetTypeAnImage;
2
PHP
PHP
fix lint error
7ff987b05919bbac4c0acd59a8e638e395455022
<ide><path>Cake/Controller/Controller.php <ide> class Controller extends Object implements EventListener { <ide> <ide> use MergeVariablesTrait; <del> use RequestActionTrait; <ide> use RepositoryAwareTrait; <add> use RequestActionTrait; <ide> use ViewVarsTrait; <ide> <ide> /**
1
Java
Java
use class canonical name for partial_wake_lock tag
88dbb4558cd10f129f2c31e3b0b872924aba5416
<ide><path>ReactAndroid/src/main/java/com/facebook/react/HeadlessJsTaskService.java <ide> public static void acquireWakeLockNow(Context context) { <ide> Assertions.assertNotNull((PowerManager) context.getSystemService(POWER_SERVICE)); <ide> sWakeLock = powerManager.newWakeLock( <ide> PowerManager.PARTIAL_WAKE_LOCK, <del> HeadlessJsTaskService.class.getSimpleName()); <add> HeadlessJsTaskService.class.getCanonicalName()); <ide> sWakeLock.setReferenceCounted(false); <ide> sWakeLock.acquire(); <ide> }
1
Javascript
Javascript
update an obsolete path
65d6249979fc0dc0376a170ecf252db095ea656a
<ide><path>benchmark/http/_chunky_http_client.js <ide> // test HTTP throughput in fragmented header case <ide> var common = require('../common.js'); <ide> var net = require('net'); <del>var test = require('../../test/common.js'); <add>var test = require('../../test/common'); <ide> <ide> var bench = common.createBenchmark(main, { <ide> len: [1, 4, 8, 16, 32, 64, 128],
1
Python
Python
add tf bert files
d68a8fe462ad836dc2127b21b456f1d8d65bec3b
<ide><path>pytorch_transformers/file_utils.py <ide> def url_to_filename(url, etag=None): <ide> Convert `url` into a hashed filename in a repeatable way. <ide> If `etag` is specified, append its hash to the url's, delimited <ide> by a period. <add> If the url ends with .h5 (Keras HDF5 weights) ands '.h5' to the name <add> so that TF 2.0 can identify it as a HDF5 file <add> (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) <ide> """ <ide> url_bytes = url.encode('utf-8') <ide> url_hash = sha256(url_bytes) <ide> def url_to_filename(url, etag=None): <ide> etag_hash = sha256(etag_bytes) <ide> filename += '.' + etag_hash.hexdigest() <ide> <add> if url.endswith('.h5'): <add> filename += '.h5' <add> <ide> return filename <ide> <ide> <ide><path>pytorch_transformers/modeling_tf_bert.py <add># coding=utf-8 <add># Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. <add># Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. <add># <add># Licensed under the Apache License, Version 2.0 (the "License"); <add># you may not use this file except in compliance with the License. <add># You may obtain a copy of the License at <add># <add># http://www.apache.org/licenses/LICENSE-2.0 <add># <add># Unless required by applicable law or agreed to in writing, software <add># distributed under the License is distributed on an "AS IS" BASIS, <add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add># See the License for the specific language governing permissions and <add># limitations under the License. <add>""" TF 2.0 BERT model. """ <add> <add>from __future__ import absolute_import, division, print_function, unicode_literals <add> <add>import json <add>import logging <add>import math <add>import os <add>import sys <add>from io import open <add> <add>import numpy as np <add>import tensorflow as tf <add> <add>from .configuration_bert import BertConfig <add>from .modeling_tf_utils import TFPreTrainedModel <add>from .file_utils import add_start_docstrings <add> <add>logger = logging.getLogger(__name__) <add> <add> <add>TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { <add> 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-tf_model.h5", <add> 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-tf_model.h5", <add> 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-tf_model.h5", <add> 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-tf_model.h5", <add> 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-tf_model.h5", <add> 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-tf_model.h5", <add> 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-tf_model.h5", <add> 'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-tf_model.h5", <add> 'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-tf_model.h5", <add> 'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-tf_model.h5", <add> 'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-tf_model.h5", <add> 'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-tf_model.h5", <add> 'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-tf_model.h5", <add>} <add> <add> <add>def load_pt_weights_in_bert(tf_model, config, pytorch_checkpoint_path): <add> """ Load pytorch checkpoints in a TF 2.0 model and save it using HDF5 format <add> We use HDF5 to easily do transfer learning <add> (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). <add> """ <add> try: <add> import re <add> import torch <add> import numpy <add> from tensorflow.python.keras import backend as K <add> except ImportError: <add> logger.error("Loading a PyTorch model in TensorFlow, requires PyTorch to be installed. Please see " <add> "https://pytorch.org/ for installation instructions.") <add> raise <add> <add> pt_path = os.path.abspath(pytorch_checkpoint_path) <add> logger.info("Loading PyTorch weights from {}".format(pt_path)) <add> # Load pytorch model <add> state_dict = torch.load(pt_path, map_location='cpu') <add> <add> inputs_list = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] <add> tf_inputs = tf.constant(inputs_list) <add> tfo = tf_model(tf_inputs, training=False) # build the network <add> <add> symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights <add> weight_value_tuples = [] <add> for symbolic_weight in symbolic_weights: <add> name = symbolic_weight.name <add> name = name.replace('cls_mlm', 'cls') # We had to split this layer in two in the TF model to be <add> name = name.replace('cls_nsp', 'cls') # able to do transfer learning (Keras only allow to remove full layers) <add> name = name.replace(':0', '') <add> name = name.replace('layer_', 'layer/') <add> name = name.split('/') <add> name = name[1:] <add> <add> transpose = bool(name[-1] == 'kernel') <add> if name[-1] == 'kernel' or name[-1] == 'embeddings': <add> name[-1] = 'weight' <add> <add> name = '.'.join(name) <add> assert name in state_dict <add> array = state_dict[name].numpy() <add> <add> if transpose: <add> array = numpy.transpose(array) <add> <add> try: <add> assert list(symbolic_weight.shape) == list(array.shape) <add> except AssertionError as e: <add> e.args += (symbolic_weight.shape, array.shape) <add> raise e <add> <add> logger.info("Initialize TF weight {}".format(symbolic_weight.name)) <add> <add> weight_value_tuples.append((symbolic_weight, array)) <add> <add> K.batch_set_value(weight_value_tuples) <add> <add> tfo = tf_model(tf_inputs, training=False) # Make sure restore ops are run <add> return tf_model <add> <add> <add>def gelu(x): <add> """Gaussian Error Linear Unit. <add> This is a smoother version of the RELU. <add> Original paper: https://arxiv.org/abs/1606.08415 <add> Args: <add> x: float Tensor to perform activation. <add> Returns: <add> `x` with the GELU activation applied. <add> """ <add> cdf = 0.5 * (1.0 + tf.tanh( <add> (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) <add> return x * cdf <add> <add> <add>def swish(x): <add> return x * tf.sigmoid(x) <add> <add> <add>ACT2FN = {"gelu": tf.keras.layers.Activation(gelu), <add> "relu": tf.keras.activations.relu, <add> "swish": tf.keras.layers.Activation(swish)} <add> <add> <add>class TFBertEmbeddings(tf.keras.layers.Layer): <add> """Construct the embeddings from word, position and token_type embeddings. <add> """ <add> def __init__(self, config, **kwargs): <add> super(TFBertEmbeddings, self).__init__(**kwargs) <add> self.word_embeddings = tf.keras.layers.Embedding(config.vocab_size, config.hidden_size, name='word_embeddings') <add> self.position_embeddings = tf.keras.layers.Embedding(config.max_position_embeddings, config.hidden_size, name='position_embeddings') <add> self.token_type_embeddings = tf.keras.layers.Embedding(config.type_vocab_size, config.hidden_size, name='token_type_embeddings') <add> <add> # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load <add> # any TensorFlow checkpoint file <add> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') <add> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) <add> <add> def call(self, inputs, training=False): <add> input_ids, position_ids, token_type_ids = inputs <add> <add> seq_length = tf.shape(input_ids)[1] <add> if position_ids is None: <add> position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :] <add> if token_type_ids is None: <add> token_type_ids = tf.fill(tf.shape(input_ids), 0) <add> <add> words_embeddings = self.word_embeddings(input_ids) <add> position_embeddings = self.position_embeddings(position_ids) <add> token_type_embeddings = self.token_type_embeddings(token_type_ids) <add> <add> embeddings = words_embeddings + position_embeddings + token_type_embeddings <add> embeddings = self.LayerNorm(embeddings) <add> if training: <add> embeddings = self.dropout(embeddings) <add> return embeddings <add> <add> <add>class TFBertSelfAttention(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertSelfAttention, self).__init__(**kwargs) <add> if config.hidden_size % config.num_attention_heads != 0: <add> raise ValueError( <add> "The hidden size (%d) is not a multiple of the number of attention " <add> "heads (%d)" % (config.hidden_size, config.num_attention_heads)) <add> self.output_attentions = config.output_attentions <add> <add> self.num_attention_heads = config.num_attention_heads <add> assert config.hidden_size % config.num_attention_heads == 0 <add> self.attention_head_size = int(config.hidden_size / config.num_attention_heads) <add> self.all_head_size = self.num_attention_heads * self.attention_head_size <add> <add> self.query = tf.keras.layers.Dense(self.all_head_size, name='query') <add> self.key = tf.keras.layers.Dense(self.all_head_size, name='key') <add> self.value = tf.keras.layers.Dense(self.all_head_size, name='value') <add> <add> self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) <add> <add> def transpose_for_scores(self, x, batch_size): <add> x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) <add> return tf.transpose(x, perm=[0, 2, 1, 3]) <add> <add> def call(self, inputs, training=False): <add> hidden_states, attention_mask, head_mask = inputs <add> <add> batch_size = tf.shape(hidden_states)[0] <add> mixed_query_layer = self.query(hidden_states) <add> mixed_key_layer = self.key(hidden_states) <add> mixed_value_layer = self.value(hidden_states) <add> <add> query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) <add> key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) <add> value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) <add> <add> # Take the dot product between "query" and "key" to get the raw attention scores. <add> attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) # (batch size, num_heads, seq_len_q, seq_len_k) <add> dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) # scale attention_scores <add> attention_scores = attention_scores / tf.math.sqrt(dk) <add> # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) <add> attention_scores = attention_scores + attention_mask <add> <add> # Normalize the attention scores to probabilities. <add> attention_probs = tf.nn.softmax(attention_scores, axis=-1) <add> <add> if training: <add> # This is actually dropping out entire tokens to attend to, which might <add> # seem a bit unusual, but is taken from the original Transformer paper. <add> attention_probs = self.dropout(attention_probs) <add> <add> # Mask heads if we want to <add> if head_mask is not None: <add> attention_probs = attention_probs * head_mask <add> <add> context_layer = tf.matmul(attention_probs, value_layer) <add> <add> context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) <add> context_layer = tf.reshape(context_layer, <add> (batch_size, -1, self.all_head_size)) # (batch_size, seq_len_q, all_head_size) <add> <add> outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) <add> return outputs <add> <add> <add>class TFBertSelfOutput(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertSelfOutput, self).__init__(**kwargs) <add> self.dense = tf.keras.layers.Dense(config.hidden_size, name='dense') <add> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') <add> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) <add> <add> def call(self, inputs, training=False): <add> hidden_states, input_tensor = inputs <add> <add> hidden_states = self.dense(hidden_states) <add> if training: <add> hidden_states = self.dropout(hidden_states) <add> hidden_states = self.LayerNorm(hidden_states + input_tensor) <add> return hidden_states <add> <add> <add>class TFBertAttention(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertAttention, self).__init__(**kwargs) <add> self.self_attention = TFBertSelfAttention(config, name='self') <add> self.dense_output = TFBertSelfOutput(config, name='output') <add> <add> def prune_heads(self, heads): <add> raise NotImplementedError <add> <add> def call(self, inputs, training=False): <add> input_tensor, attention_mask, head_mask = inputs <add> <add> self_outputs = self.self_attention([input_tensor, attention_mask, head_mask], training=training) <add> attention_output = self.dense_output([self_outputs[0], input_tensor], training=training) <add> outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them <add> return outputs <add> <add> <add>class TFBertIntermediate(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertIntermediate, self).__init__(**kwargs) <add> self.dense = tf.keras.layers.Dense(config.intermediate_size, name='dense') <add> if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): <add> self.intermediate_act_fn = ACT2FN[config.hidden_act] <add> else: <add> self.intermediate_act_fn = config.hidden_act <add> <add> def call(self, hidden_states): <add> hidden_states = self.dense(hidden_states) <add> hidden_states = self.intermediate_act_fn(hidden_states) <add> return hidden_states <add> <add> <add>class TFBertOutput(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertOutput, self).__init__(**kwargs) <add> self.dense = tf.keras.layers.Dense(config.hidden_size, name='dense') <add> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') <add> self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) <add> <add> def call(self, inputs, training=False): <add> hidden_states, input_tensor = inputs <add> <add> hidden_states = self.dense(hidden_states) <add> if training: <add> hidden_states = self.dropout(hidden_states) <add> hidden_states = self.LayerNorm(hidden_states + input_tensor) <add> return hidden_states <add> <add> <add>class TFBertLayer(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertLayer, self).__init__(**kwargs) <add> self.attention = TFBertAttention(config, name='attention') <add> self.intermediate = TFBertIntermediate(config, name='intermediate') <add> self.bert_output = TFBertOutput(config, name='output') <add> <add> def call(self, inputs, training=False): <add> hidden_states, attention_mask, head_mask = inputs <add> <add> attention_outputs = self.attention([hidden_states, attention_mask, head_mask], training=training) <add> attention_output = attention_outputs[0] <add> intermediate_output = self.intermediate(attention_output) <add> layer_output = self.bert_output([intermediate_output, attention_output], training=training) <add> outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them <add> return outputs <add> <add> <add>class TFBertEncoder(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertEncoder, self).__init__(**kwargs) <add> self.output_attentions = config.output_attentions <add> self.output_hidden_states = config.output_hidden_states <add> self.layer = [TFBertLayer(config, name='layer_{}'.format(i)) for i in range(config.num_hidden_layers)] <add> <add> def call(self, inputs, training=False): <add> hidden_states, attention_mask, head_mask = inputs <add> <add> all_hidden_states = () <add> all_attentions = () <add> for i, layer_module in enumerate(self.layer): <add> if self.output_hidden_states: <add> all_hidden_states = all_hidden_states + (hidden_states,) <add> <add> layer_outputs = layer_module([hidden_states, attention_mask, head_mask[i]], training=training) <add> hidden_states = layer_outputs[0] <add> <add> if self.output_attentions: <add> all_attentions = all_attentions + (layer_outputs[1],) <add> <add> # Add last layer <add> if self.output_hidden_states: <add> all_hidden_states = all_hidden_states + (hidden_states,) <add> <add> outputs = (hidden_states,) <add> if self.output_hidden_states: <add> outputs = outputs + (all_hidden_states,) <add> if self.output_attentions: <add> outputs = outputs + (all_attentions,) <add> return outputs # outputs, (hidden states), (attentions) <add> <add> <add>class TFBertPooler(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertPooler, self).__init__(**kwargs) <add> self.dense = tf.keras.layers.Dense(config.hidden_size, activation='tanh', name='dense') <add> <add> def call(self, hidden_states): <add> # We "pool" the model by simply taking the hidden state corresponding <add> # to the first token. <add> first_token_tensor = hidden_states[:, 0] <add> pooled_output = self.dense(first_token_tensor) <add> return pooled_output <add> <add> <add>class TFBertPredictionHeadTransform(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertPredictionHeadTransform, self).__init__(**kwargs) <add> self.dense = tf.keras.layers.Dense(config.hidden_size, name='dense') <add> if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)): <add> self.transform_act_fn = ACT2FN[config.hidden_act] <add> else: <add> self.transform_act_fn = config.hidden_act <add> self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='LayerNorm') <add> <add> def call(self, hidden_states): <add> hidden_states = self.dense(hidden_states) <add> hidden_states = self.transform_act_fn(hidden_states) <add> hidden_states = self.LayerNorm(hidden_states) <add> return hidden_states <add> <add> <add>class TFBertLMPredictionHead(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertLMPredictionHead, self).__init__(**kwargs) <add> self.vocab_size = config.vocab_size <add> self.transform = TFBertPredictionHeadTransform(config, name='transform') <add> <add> # The output weights are the same as the input embeddings, but there is <add> # an output-only bias for each token. <add> self.decoder = tf.keras.layers.Dense(config.vocab_size, use_bias=False, name='decoder') <add> <add> def build(self, input_shape): <add> self.bias = self.add_weight(shape=(self.vocab_size,), <add> initializer='zeros', <add> trainable=True, <add> name='bias') <add> <add> def call(self, hidden_states): <add> hidden_states = self.transform(hidden_states) <add> hidden_states = self.decoder(hidden_states) + self.bias <add> return hidden_states <add> <add> <add>class TFBertMLMHead(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertMLMHead, self).__init__(**kwargs) <add> self.predictions = TFBertLMPredictionHead(config, name='predictions') <add> <add> def call(self, sequence_output): <add> prediction_scores = self.predictions(sequence_output) <add> return prediction_scores <add> <add> <add>class TFBertNSPHead(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertNSPHead, self).__init__(**kwargs) <add> self.seq_relationship = tf.keras.layers.Dense(2, name='seq_relationship') <add> <add> def call(self, pooled_output): <add> seq_relationship_score = self.seq_relationship(pooled_output) <add> return seq_relationship_score <add> <add> <add>class TFBertMainLayer(tf.keras.layers.Layer): <add> def __init__(self, config, **kwargs): <add> super(TFBertMainLayer, self).__init__(**kwargs) <add> self.num_hidden_layers = config.num_hidden_layers <add> <add> self.embeddings = TFBertEmbeddings(config, name='embeddings') <add> self.encoder = TFBertEncoder(config, name='encoder') <add> self.pooler = TFBertPooler(config, name='pooler') <add> <add> # self.apply(self.init_weights) # TODO check weights initialization <add> <add> def _resize_token_embeddings(self, new_num_tokens): <add> raise NotImplementedError <add> <add> def _prune_heads(self, heads_to_prune): <add> """ Prunes heads of the model. <add> heads_to_prune: dict of {layer_num: list of heads to prune in this layer} <add> See base class PreTrainedModel <add> """ <add> raise NotImplementedError <add> <add> def call(self, inputs, training=False): <add> if not isinstance(inputs, (dict, tuple, list)): <add> input_ids = inputs <add> attention_mask, head_mask, position_ids, token_type_ids = None, None, None, None <add> elif isinstance(inputs, (tuple, list)): <add> input_ids = inputs[0] <add> attention_mask = inputs[1] if len(inputs) > 1 else None <add> token_type_ids = inputs[2] if len(inputs) > 2 else None <add> position_ids = inputs[3] if len(inputs) > 3 else None <add> head_mask = inputs[4] if len(inputs) > 4 else None <add> assert len(inputs) <= 5, "Too many inputs." <add> else: <add> input_ids = inputs.pop('input_ids') <add> attention_mask = inputs.pop('attention_mask', None) <add> token_type_ids = inputs.pop('token_type_ids', None) <add> position_ids = inputs.pop('position_ids', None) <add> head_mask = inputs.pop('head_mask', None) <add> assert len(inputs) == 0, "Unexpected inputs detected: {}. Check inputs dict key names.".format(list(inputs.keys())) <add> <add> if attention_mask is None: <add> attention_mask = tf.fill(tf.shape(input_ids), 1) <add> if token_type_ids is None: <add> token_type_ids = tf.fill(tf.shape(input_ids), 0) <add> <add> # We create a 3D attention mask from a 2D tensor mask. <add> # Sizes are [batch_size, 1, 1, to_seq_length] <add> # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] <add> # this attention mask is more simple than the triangular masking of causal attention <add> # used in OpenAI GPT, we just need to prepare the broadcast dimension here. <add> extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :] <add> <add> # Since attention_mask is 1.0 for positions we want to attend and 0.0 for <add> # masked positions, this operation will create a tensor which is 0.0 for <add> # positions we want to attend and -10000.0 for masked positions. <add> # Since we are adding it to the raw scores before the softmax, this is <add> # effectively the same as removing these entirely. <add> <add> extended_attention_mask = tf.cast(extended_attention_mask, tf.float32) <add> extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 <add> <add> # Prepare head mask if needed <add> # 1.0 in head_mask indicate we keep the head <add> # attention_probs has shape bsz x n_heads x N x N <add> # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] <add> # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] <add> if not head_mask is None: <add> raise NotImplementedError <add> else: <add> head_mask = [None] * self.num_hidden_layers <add> # head_mask = tf.constant([0] * self.num_hidden_layers) <add> <add> embedding_output = self.embeddings([input_ids, position_ids, token_type_ids], training=training) <add> encoder_outputs = self.encoder([embedding_output, extended_attention_mask, head_mask], training=training) <add> <add> sequence_output = encoder_outputs[0] <add> pooled_output = self.pooler(sequence_output) <add> <add> outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here <add> return outputs # sequence_output, pooled_output, (hidden_states), (attentions) <add> <add>class TFBertPreTrainedModel(TFPreTrainedModel): <add> """ An abstract class to handle weights initialization and <add> a simple interface for dowloading and loading pretrained models. <add> """ <add> config_class = BertConfig <add> pretrained_model_archive_map = TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP <add> load_pt_weights = load_pt_weights_in_bert <add> base_model_prefix = "bert" <add> <add> def __init__(self, *inputs, **kwargs): <add> super(TFBertPreTrainedModel, self).__init__(*inputs, **kwargs) <add> <add> def init_weights(self, module): <add> """ Initialize the weights. <add> """ <add> raise NotImplementedError <add> <add> <add>BERT_START_DOCSTRING = r""" The BERT model was proposed in <add> `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ <add> by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer <add> pre-trained using a combination of masked language modeling objective and next sentence prediction <add> on a large corpus comprising the Toronto Book Corpus and Wikipedia. <add> <add> This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and <add> refer to the TF 2.0 documentation for all matter related to general usage and behavior. <add> <add> .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`: <add> https://arxiv.org/abs/1810.04805 <add> <add> .. _`tf.keras.Model`: <add> https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model <add> <add> Important note on the model inputs: <add> The inputs of the TF 2.0 models are slightly different from the PyTorch ones since <add> TF 2.0 Keras doesn't accept named arguments with defaults values for input Tensor. <add> More precisely, input Tensors are gathered in the first arguments of the model call function: `model(inputs)`. <add> There are three possibilities to gather and feed the inputs to the model: <add> <add> - a single Tensor with input_ids only and nothing else: `model(inputs_ids) <add> - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: <add> `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` <add> - a dictionary with one or several input Tensors associaed to the input names given in the docstring: <add> `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})` <add> <add> Parameters: <add> config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model. <add> Initializing with a config file does not load the weights associated with the model, only the configuration. <add> Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights. <add>""" <add> <add>BERT_INPUTS_DOCSTRING = r""" <add> Inputs: <add> **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Indices of input sequence tokens in the vocabulary. <add> To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows: <add> <add> (a) For sequence pairs: <add> <add> ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]`` <add> <add> ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1`` <add> <add> (b) For single sequences: <add> <add> ``tokens: [CLS] the dog is hairy . [SEP]`` <add> <add> ``token_type_ids: 0 0 0 0 0 0 0`` <add> <add> Bert is a model with absolute position embeddings so it's usually advised to pad the inputs on <add> the right rather than the left. <add> <add> Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`. <add> See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and <add> :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. <add> **attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``: <add> Mask to avoid performing attention on padding token indices. <add> Mask values selected in ``[0, 1]``: <add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <add> **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Segment token indices to indicate first and second portions of the inputs. <add> Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` <add> corresponds to a `sentence B` token <add> (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). <add> **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Indices of positions of each input sequence tokens in the position embeddings. <add> Selected in the range ``[0, config.max_position_embeddings - 1]``. <add> **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <add> Mask to nullify selected heads of the self-attention modules. <add> Mask values selected in ``[0, 1]``: <add> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <add>""" <add> <add>@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.", <add> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <add>class TFBertModel(TFBertPreTrainedModel): <add> r""" <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` <add> Sequence of hidden-states at the output of the last layer of the model. <add> **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)`` <add> Last layer hidden-state of the first token of the sequence (classification token) <add> further processed by a Linear layer and a Tanh activation function. The Linear <add> layer weights are trained from the next sentence prediction (classification) <add> objective during Bert pretraining. This output is usually *not* a good summary <add> of the semantic content of the input, you're often better with averaging or pooling <add> the sequence of hidden-states for the whole input sequence. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> <add> Examples:: <add> <add> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') <add> model = TFBertModel.from_pretrained('bert-base-uncased') <add> input_ids = tf.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> outputs = model(input_ids) <add> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple <add> <add> """ <add> def __init__(self, config): <add> super(TFBertModel, self).__init__(config) <add> self.bert = TFBertMainLayer(config, name='bert') <add> <add> def call(self, inputs, training=False): <add> outputs = self.bert(inputs, training=training) <add> return outputs <add> <add> <add>@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: <add> a `masked language modeling` head and a `next sentence prediction (classification)` head. """, <add> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <add>class TFBertForPreTraining(TFBertPreTrainedModel): <add> r""" <add> **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Labels for computing the masked language modeling loss. <add> Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) <add> Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels <add> in ``[0, ..., config.vocab_size]`` <add> **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) <add> Indices should be in ``[0, 1]``. <add> ``0`` indicates sequence B is a continuation of sequence A, <add> ``1`` indicates sequence B is a random sequence. <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. <add> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <add> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <add> **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` <add> Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> <add> Examples:: <add> <add> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') <add> model = TFBertForPreTraining.from_pretrained('bert-base-uncased') <add> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> outputs = model(input_ids) <add> prediction_scores, seq_relationship_scores = outputs[:2] <add> <add> """ <add> def __init__(self, config): <add> super(TFBertForPreTraining, self).__init__(config) <add> <add> self.bert = TFBertMainLayer(config, name='bert') <add> self.cls_mlm = TFBertMLMHead(config, name='cls_mlm') <add> self.cls_nsp = TFBertNSPHead(config, name='cls_nsp') <add> <add> # self.apply(self.init_weights) # TODO check added weights initialization <add> self.tie_weights() <add> <add> def tie_weights(self): <add> """ Make sure we are sharing the input and output embeddings. <add> """ <add> pass # TODO add weights tying <add> <add> def call(self, inputs, training=False): <add> outputs = self.bert(inputs, training=training) <add> <add> sequence_output, pooled_output = outputs[:2] <add> prediction_scores = self.cls_mlm(sequence_output) <add> seq_relationship_score = self.cls_nsp(pooled_output) <add> <add> outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here <add> <add> # if masked_lm_labels is not None and next_sentence_label is not None: <add> # loss_fct = CrossEntropyLoss(ignore_index=-1) <add> # masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) <add> # next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) <add> # total_loss = masked_lm_loss + next_sentence_loss <add> # outputs = (total_loss,) + outputs <add> # TODO add example with losses using model.compile and a dictionary of losses (give names to the output layers) <add> <add> return outputs # prediction_scores, seq_relationship_score, (hidden_states), (attentions) <add> <add> <add>@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, <add> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <add>class TFBertForMaskedLM(TFBertPreTrainedModel): <add> r""" <add> **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Labels for computing the masked language modeling loss. <add> Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) <add> Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels <add> in ``[0, ..., config.vocab_size]`` <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Masked language modeling loss. <add> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <add> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> <add> Examples:: <add> <add> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') <add> model = TFBertForMaskedLM.from_pretrained('bert-base-uncased') <add> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> outputs = model(input_ids, masked_lm_labels=input_ids) <add> loss, prediction_scores = outputs[:2] <add> <add> """ <add> def __init__(self, config): <add> super(TFBertForMaskedLM, self).__init__(config) <add> <add> self.bert = TFBertMainLayer(config, name='bert') <add> self.cls_mlm = TFBertMLMHead(config, name='cls_mlm') <add> <add> # self.apply(self.init_weights) <add> self.tie_weights() <add> <add> def tie_weights(self): <add> """ Make sure we are sharing the input and output embeddings. <add> """ <add> pass # TODO add weights tying <add> <add> def call(self, inputs, training=False): <add> outputs = self.bert(inputs, training=training) <add> <add> sequence_output = outputs[0] <add> prediction_scores = self.cls_mlm(sequence_output) <add> <add> outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here <add> # if masked_lm_labels is not None: <add> # loss_fct = CrossEntropyLoss(ignore_index=-1) <add> # masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) <add> # outputs = (masked_lm_loss,) + outputs <add> # TODO example with losses <add> <add> return outputs # prediction_scores, (hidden_states), (attentions) <add> <add> <add>@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, <add> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <add>class TFBertForNextSentencePrediction(TFBertPreTrainedModel): <add> r""" <add> **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) <add> Indices should be in ``[0, 1]``. <add> ``0`` indicates sequence B is a continuation of sequence A, <add> ``1`` indicates sequence B is a random sequence. <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Next sequence prediction (classification) loss. <add> **seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)`` <add> Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> <add> Examples:: <add> <add> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') <add> model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased') <add> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> outputs = model(input_ids) <add> seq_relationship_scores = outputs[0] <add> <add> """ <add> def __init__(self, config): <add> super(TFBertForNextSentencePrediction, self).__init__(config) <add> <add> self.bert = TFBertMainLayer(config, name='bert') <add> self.cls_nsp = TFBertNSPHead(config, name='cls_nsp') <add> <add> # self.apply(self.init_weights) <add> <add> def call(self, inputs, training=False): <add> outputs = self.bert(inputs, training=training) <add> <add> pooled_output = outputs[1] <add> seq_relationship_score = self.cls_nsp(pooled_output) <add> <add> outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here <add> # if next_sentence_label is not None: <add> # loss_fct = CrossEntropyLoss(ignore_index=-1) <add> # next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) <add> # outputs = (next_sentence_loss,) + outputs <add> <add> return outputs # seq_relationship_score, (hidden_states), (attentions) <ide><path>pytorch_transformers/modeling_tf_utils.py <add># coding=utf-8 <add># Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. <add># Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. <add># <add># Licensed under the Apache License, Version 2.0 (the "License"); <add># you may not use this file except in compliance with the License. <add># You may obtain a copy of the License at <add># <add># http://www.apache.org/licenses/LICENSE-2.0 <add># <add># Unless required by applicable law or agreed to in writing, software <add># distributed under the License is distributed on an "AS IS" BASIS, <add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add># See the License for the specific language governing permissions and <add># limitations under the License. <add>"""TF general model utils.""" <add> <add>from __future__ import (absolute_import, division, print_function, <add> unicode_literals) <add> <add>import logging <add>import os <add> <add>import tensorflow as tf <add> <add>from .configuration_utils import PretrainedConfig <add>from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME <add> <add>logger = logging.getLogger(__name__) <add> <add> <add>class TFPreTrainedModel(tf.keras.Model): <add> r""" Base class for all TF models. <add> <add> :class:`~pytorch_transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models <add> as well as a few methods commons to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads. <add> <add> Class attributes (overridden by derived classes): <add> - ``config_class``: a class derived from :class:`~pytorch_transformers.PretrainedConfig` to use as configuration class for this model architecture. <add> - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values. <add> - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: <add> <add> - ``model``: an instance of the relevant subclass of :class:`~pytorch_transformers.PreTrainedModel`, <add> - ``config``: an instance of the relevant subclass of :class:`~pytorch_transformers.PretrainedConfig`, <add> - ``path``: a path (string) to the TensorFlow checkpoint. <add> <add> - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. <add> """ <add> config_class = None <add> pretrained_model_archive_map = {} <add> load_pt_weights = lambda model, config, path: None <add> base_model_prefix = "" <add> <add> def __init__(self, config, *inputs, **kwargs): <add> super(TFPreTrainedModel, self).__init__() <add> if not isinstance(config, PretrainedConfig): <add> raise ValueError( <add> "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " <add> "To create a model from a pretrained model use " <add> "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( <add> self.__class__.__name__, self.__class__.__name__ <add> )) <add> # Save config in model <add> self.config = config <add> <add> def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None): <add> """ Build a resized Embedding Module from a provided token Embedding Module. <add> Increasing the size will add newly initialized vectors at the end <add> Reducing the size will remove vectors from the end <add> <add> Args: <add> new_num_tokens: (`optional`) int <add> New number of tokens in the embedding matrix. <add> Increasing the size will add newly initialized vectors at the end <add> Reducing the size will remove vectors from the end <add> If not provided or None: return the provided token Embedding Module. <add> Return: ``torch.nn.Embeddings`` <add> Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None <add> """ <add> raise NotImplementedError <add> <add> def _tie_or_clone_weights(self, first_module, second_module): <add> """ Tie or clone module weights depending of weither we are using TorchScript or not <add> """ <add> raise NotImplementedError <add> <add> def resize_token_embeddings(self, new_num_tokens=None): <add> """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. <add> Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. <add> <add> Arguments: <add> <add> new_num_tokens: (`optional`) int: <add> New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. <add> If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model. <add> <add> Return: ``torch.nn.Embeddings`` <add> Pointer to the input tokens Embeddings Module of the model <add> """ <add> raise NotImplementedError <add> <add> def prune_heads(self, heads_to_prune): <add> """ Prunes heads of the base model. <add> <add> Arguments: <add> <add> heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`). <add> """ <add> raise NotImplementedError <add> <add> def save_pretrained(self, save_directory): <add> """ Save a model and its configuration file to a directory, so that it <add> can be re-loaded using the `:func:`~pytorch_transformers.PreTrainedModel.from_pretrained`` class method. <add> """ <add> raise NotImplementedError <add> <add> @classmethod <add> def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): <add> r"""Instantiate a pretrained pytorch model from a pre-trained model configuration. <add> <add> The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated) <add> To train the model, you should first set it back in training mode with ``model.train()`` <add> <add> The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model. <add> It is up to you to train those weights with a downstream fine-tuning task. <add> <add> The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded. <add> <add> Parameters: <add> pretrained_model_name_or_path: either: <add> <add> - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``. <add> - a path to a `directory` containing model weights saved using :func:`~pytorch_transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``. <add> - a path or url to a `PyTorch state_dict save file` (e.g. `./pt_model/pytorch_model.bin`). In this case, ``from_pt`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a TensorFlow model using the provided conversion scripts and loading the TensorFlow model afterwards. <add> <add> model_args: (`optional`) Sequence of positional arguments: <add> All remaning positional arguments will be passed to the underlying model's ``__init__`` method <add> <add> config: (`optional`) instance of a class derived from :class:`~pytorch_transformers.PretrainedConfig`: <add> Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: <add> <add> - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or <add> - the model was saved using :func:`~pytorch_transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. <add> - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. <add> <add> from_pt: (`optional`) boolean, default False: <add> Load the model weights from a PyTorch state_dict save file (see docstring of pretrained_model_name_or_path argument). <add> <add> cache_dir: (`optional`) string: <add> Path to a directory in which a downloaded pre-trained model <add> configuration should be cached if the standard cache should not be used. <add> <add> force_download: (`optional`) boolean, default False: <add> Force to (re-)download the model weights and configuration files and override the cached versions if they exists. <add> <add> proxies: (`optional`) dict, default None: <add> A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. <add> The proxies are used on each request. <add> <add> output_loading_info: (`optional`) boolean: <add> Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages. <add> <add> kwargs: (`optional`) Remaining dictionary of keyword arguments: <add> Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded: <add> <add> - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) <add> - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~pytorch_transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. <add> <add> Examples:: <add> <add> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache. <add> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')` <add> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading <add> assert model.config.output_attention == True <add> # Loading from a TF checkpoint file instead of a PyTorch model (slower) <add> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') <add> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_pt=True, config=config) <add> <add> """ <add> config = kwargs.pop('config', None) <add> cache_dir = kwargs.pop('cache_dir', None) <add> from_pt = kwargs.pop('from_pt', False) <add> force_download = kwargs.pop('force_download', False) <add> proxies = kwargs.pop('proxies', None) <add> output_loading_info = kwargs.pop('output_loading_info', False) <add> <add> # Load config <add> if config is None: <add> config, model_kwargs = cls.config_class.from_pretrained( <add> pretrained_model_name_or_path, *model_args, <add> cache_dir=cache_dir, return_unused_kwargs=True, <add> force_download=force_download, <add> **kwargs <add> ) <add> else: <add> model_kwargs = kwargs <add> <add> # Load model <add> if pretrained_model_name_or_path in cls.pretrained_model_archive_map: <add> archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path] <add> elif os.path.isdir(pretrained_model_name_or_path): <add> if from_pt: <add> # Load from a PyTorch checkpoint <add> archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) <add> else: <add> archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME) <add> else: <add> archive_file = pretrained_model_name_or_path <add> # redirect to the cache, if necessary <add> try: <add> resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies) <add> except EnvironmentError: <add> if pretrained_model_name_or_path in cls.pretrained_model_archive_map: <add> logger.error( <add> "Couldn't reach server at '{}' to download pretrained weights.".format( <add> archive_file)) <add> else: <add> logger.error( <add> "Model name '{}' was not found in model name list ({}). " <add> "We assumed '{}' was a path or url but couldn't find any file " <add> "associated to this path or url.".format( <add> pretrained_model_name_or_path, <add> ', '.join(cls.pretrained_model_archive_map.keys()), <add> archive_file)) <add> return None <add> if resolved_archive_file == archive_file: <add> logger.info("loading weights file {}".format(archive_file)) <add> else: <add> logger.info("loading weights file {} from cache at {}".format( <add> archive_file, resolved_archive_file)) <add> <add> # Instantiate model. <add> model = cls(config, *model_args, **model_kwargs) <add> <add> if from_pt: <add> # Load from a PyTorch checkpoint <add> return cls.load_pt_weights(model, config, resolved_archive_file) <add> <add> inputs = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) <add> ret = model(inputs, training=False) # build the network with dummy inputs <add> <add> # 'by_name' allow us to do transfer learning by skipping/adding layers <add> # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357 <add> model.load_weights(resolved_archive_file, by_name=True) <add> <add> ret = model(inputs, training=False) # Make sure restore ops are run <add> <add> # if hasattr(model, 'tie_weights'): <add> # model.tie_weights() # TODO make sure word embedding weights are still tied <add> <add> if output_loading_info: <add> loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs} <add> return model, loading_info <add> <add> return model <ide><path>pytorch_transformers/tests/modeling_tf_common_test.py <add># coding=utf-8 <add># Copyright 2019 HuggingFace Inc. <add># <add># Licensed under the Apache License, Version 2.0 (the "License"); <add># you may not use this file except in compliance with the License. <add># You may obtain a copy of the License at <add># <add># http://www.apache.org/licenses/LICENSE-2.0 <add># <add># Unless required by applicable law or agreed to in writing, software <add># distributed under the License is distributed on an "AS IS" BASIS, <add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add># See the License for the specific language governing permissions and <add># limitations under the License. <add>from __future__ import absolute_import <add>from __future__ import division <add>from __future__ import print_function <add> <add>import copy <add>import os <add>import shutil <add>import json <add>import random <add>import uuid <add> <add>import unittest <add>import logging <add> <add>import tensorflow as tf <add> <add>from pytorch_transformers import TFPreTrainedModel <add># from pytorch_transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP <add> <add> <add>def _config_zero_init(config): <add> configs_no_init = copy.deepcopy(config) <add> for key in configs_no_init.__dict__.keys(): <add> if '_range' in key or '_std' in key: <add> setattr(configs_no_init, key, 0.0) <add> return configs_no_init <add> <add>class TFCommonTestCases: <add> <add> class TFCommonModelTester(unittest.TestCase): <add> <add> model_tester = None <add> all_model_classes = () <add> test_torchscript = True <add> test_pruning = True <add> test_resize_embeddings = True <add> <add> def test_initialization(self): <add> pass <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # configs_no_init = _config_zero_init(config) <add> # for model_class in self.all_model_classes: <add> # model = model_class(config=configs_no_init) <add> # for name, param in model.named_parameters(): <add> # if param.requires_grad: <add> # self.assertIn(param.data.mean().item(), [0.0, 1.0], <add> # msg="Parameter {} of model {} seems not properly initialized".format(name, model_class)) <add> <add> <add> def test_attention_outputs(self): <add> pass <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # for model_class in self.all_model_classes: <add> # config.output_attentions = True <add> # config.output_hidden_states = False <add> # model = model_class(config) <add> # model.eval() <add> # outputs = model(**inputs_dict) <add> # attentions = outputs[-1] <add> # self.assertEqual(model.config.output_attentions, True) <add> # self.assertEqual(model.config.output_hidden_states, False) <add> # self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) <add> # self.assertListEqual( <add> # list(attentions[0].shape[-3:]), <add> # [self.model_tester.num_attention_heads, <add> # self.model_tester.seq_length, <add> # self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length]) <add> # out_len = len(outputs) <add> <add> # # Check attention is always last and order is fine <add> # config.output_attentions = True <add> # config.output_hidden_states = True <add> # model = model_class(config) <add> # model.eval() <add> # outputs = model(**inputs_dict) <add> # self.assertEqual(out_len+1, len(outputs)) <add> # self.assertEqual(model.config.output_attentions, True) <add> # self.assertEqual(model.config.output_hidden_states, True) <add> <add> # attentions = outputs[-1] <add> # self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) <add> # self.assertListEqual( <add> # list(attentions[0].shape[-3:]), <add> # [self.model_tester.num_attention_heads, <add> # self.model_tester.seq_length, <add> # self.model_tester.key_len if hasattr(self.model_tester, 'key_len') else self.model_tester.seq_length]) <add> <add> <add> def test_headmasking(self): <add> pass <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # config.output_attentions = True <add> # config.output_hidden_states = True <add> # configs_no_init = _config_zero_init(config) # To be sure we have no Nan <add> # for model_class in self.all_model_classes: <add> # model = model_class(config=configs_no_init) <add> # model.eval() <add> <add> # # Prepare head_mask <add> # # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) <add> # head_mask = torch.ones(self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads) <add> # head_mask[0, 0] = 0 <add> # head_mask[-1, :-1] = 0 <add> # head_mask.requires_grad_(requires_grad=True) <add> # inputs = inputs_dict.copy() <add> # inputs['head_mask'] = head_mask <add> <add> # outputs = model(**inputs) <add> <add> # # Test that we can get a gradient back for importance score computation <add> # output = sum(t.sum() for t in outputs[0]) <add> # output = output.sum() <add> # output.backward() <add> # multihead_outputs = head_mask.grad <add> <add> # attentions = outputs[-1] <add> # hidden_states = outputs[-2] <add> <add> # # Remove Nan <add> <add> # self.assertIsNotNone(multihead_outputs) <add> # self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) <add> # self.assertAlmostEqual( <add> # attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) <add> # self.assertNotEqual( <add> # attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) <add> # self.assertNotEqual( <add> # attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) <add> # self.assertAlmostEqual( <add> # attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) <add> # self.assertNotEqual( <add> # attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) <add> <add> <add> def test_head_pruning(self): <add> pass <add> # if not self.test_pruning: <add> # return <add> <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # for model_class in self.all_model_classes: <add> # config.output_attentions = True <add> # config.output_hidden_states = False <add> # model = model_class(config=config) <add> # model.eval() <add> # heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)), <add> # -1: [0]} <add> # model.prune_heads(heads_to_prune) <add> # outputs = model(**inputs_dict) <add> <add> # attentions = outputs[-1] <add> <add> # self.assertEqual( <add> # attentions[0].shape[-3], 1) <add> # self.assertEqual( <add> # attentions[1].shape[-3], self.model_tester.num_attention_heads) <add> # self.assertEqual( <add> # attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) <add> <add> <add> def test_hidden_states_output(self): <add> pass <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # for model_class in self.all_model_classes: <add> # config.output_hidden_states = True <add> # config.output_attentions = False <add> # model = model_class(config) <add> # model.eval() <add> # outputs = model(**inputs_dict) <add> # hidden_states = outputs[-1] <add> # self.assertEqual(model.config.output_attentions, False) <add> # self.assertEqual(model.config.output_hidden_states, True) <add> # self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1) <add> # self.assertListEqual( <add> # list(hidden_states[0].shape[-2:]), <add> # [self.model_tester.seq_length, self.model_tester.hidden_size]) <add> <add> <add> def test_resize_tokens_embeddings(self): <add> pass <add> # original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> # if not self.test_resize_embeddings: <add> # return <add> <add> # for model_class in self.all_model_classes: <add> # config = copy.deepcopy(original_config) <add> # model = model_class(config) <add> <add> # model_vocab_size = config.vocab_size <add> # # Retrieve the embeddings and clone theme <add> # model_embed = model.resize_token_embeddings(model_vocab_size) <add> # cloned_embeddings = model_embed.weight.clone() <add> <add> # # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size <add> # model_embed = model.resize_token_embeddings(model_vocab_size + 10) <add> # self.assertEqual(model.config.vocab_size, model_vocab_size + 10) <add> # # Check that it actually resizes the embeddings matrix <add> # self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) <add> <add> # # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size <add> # model_embed = model.resize_token_embeddings(model_vocab_size - 15) <add> # self.assertEqual(model.config.vocab_size, model_vocab_size - 15) <add> # # Check that it actually resizes the embeddings matrix <add> # self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) <add> <add> # # Check that adding and removing tokens has not modified the first part of the embedding matrix. <add> # models_equal = True <add> # for p1, p2 in zip(cloned_embeddings, model_embed.weight): <add> # if p1.data.ne(p2.data).sum() > 0: <add> # models_equal = False <add> <add> # self.assertTrue(models_equal) <add> <add> <add> def test_tie_model_weights(self): <add> pass <add> # config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() <add> <add> # def check_same_values(layer_1, layer_2): <add> # equal = True <add> # for p1, p2 in zip(layer_1.weight, layer_2.weight): <add> # if p1.data.ne(p2.data).sum() > 0: <add> # equal = False <add> # return equal <add> <add> # for model_class in self.all_model_classes: <add> # if not hasattr(model_class, 'tie_weights'): <add> # continue <add> <add> # config.torchscript = True <add> # model_not_tied = model_class(config) <add> # params_not_tied = list(model_not_tied.parameters()) <add> <add> # config_tied = copy.deepcopy(config) <add> # config_tied.torchscript = False <add> # model_tied = model_class(config_tied) <add> # params_tied = list(model_tied.parameters()) <add> <add> # # Check that the embedding layer and decoding layer are the same in size and in value <add> # self.assertGreater(len(params_not_tied), len(params_tied)) <add> <add> # # Check that after resize they remain tied. <add> # model_tied.resize_token_embeddings(config.vocab_size + 10) <add> # params_tied_2 = list(model_tied.parameters()) <add> # self.assertGreater(len(params_not_tied), len(params_tied)) <add> # self.assertEqual(len(params_tied_2), len(params_tied)) <add> <add> <add>def ids_tensor(shape, vocab_size, rng=None, name=None): <add> """Creates a random int32 tensor of the shape within the vocab size.""" <add> if rng is None: <add> rng = random.Random() <add> <add> total_dims = 1 <add> for dim in shape: <add> total_dims *= dim <add> <add> values = [] <add> for _ in range(total_dims): <add> values.append(rng.randint(0, vocab_size - 1)) <add> <add> return tf.constant(values, shape=shape) <add> <add> <add>class TFModelUtilsTest(unittest.TestCase): <add> def test_model_from_pretrained(self): <add> pass <add> # logging.basicConfig(level=logging.INFO) <add> # for model_name in list(BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: <add> # config = BertConfig.from_pretrained(model_name) <add> # self.assertIsNotNone(config) <add> # self.assertIsInstance(config, PretrainedConfig) <add> <add> # model = BertModel.from_pretrained(model_name) <add> # model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) <add> # self.assertIsNotNone(model) <add> # self.assertIsInstance(model, PreTrainedModel) <add> # for value in loading_info.values(): <add> # self.assertEqual(len(value), 0) <add> <add> # config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) <add> # model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) <add> # self.assertEqual(model.config.output_attentions, True) <add> # self.assertEqual(model.config.output_hidden_states, True) <add> # self.assertEqual(model.config, config) <add> <add> <add>if __name__ == "__main__": <add> unittest.main() <ide><path>pytorch_transformers/tests/modeling_tf_test.py <add># coding=utf-8 <add># Copyright 2018 The Google AI Language Team Authors. <add># <add># Licensed under the Apache License, Version 2.0 (the "License"); <add># you may not use this file except in compliance with the License. <add># You may obtain a copy of the License at <add># <add># http://www.apache.org/licenses/LICENSE-2.0 <add># <add># Unless required by applicable law or agreed to in writing, software <add># distributed under the License is distributed on an "AS IS" BASIS, <add># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add># See the License for the specific language governing permissions and <add># limitations under the License. <add>from __future__ import absolute_import <add>from __future__ import division <add>from __future__ import print_function <add> <add>import unittest <add>import shutil <add>import pytest <add> <add>import tensorflow as tf <add> <add>from pytorch_transformers import (BertConfig) <add>from pytorch_transformers.modeling_tf_bert import TFBertModel, TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP <add> <add>from .modeling_tf_common_test import (TFCommonTestCases, ids_tensor) <add>from .configuration_common_test import ConfigTester <add> <add> <add>class TFBertModelTest(TFCommonTestCases.TFCommonModelTester): <add> <add> all_model_classes = (TFBertModel,) <add> # BertForMaskedLM, BertForNextSentencePrediction, <add> # BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, <add> # BertForTokenClassification) <add> <add> class TFBertModelTester(object): <add> <add> def __init__(self, <add> parent, <add> batch_size=13, <add> seq_length=7, <add> is_training=True, <add> use_input_mask=True, <add> use_token_type_ids=True, <add> use_labels=True, <add> vocab_size=99, <add> hidden_size=32, <add> num_hidden_layers=5, <add> num_attention_heads=4, <add> intermediate_size=37, <add> hidden_act="gelu", <add> hidden_dropout_prob=0.1, <add> attention_probs_dropout_prob=0.1, <add> max_position_embeddings=512, <add> type_vocab_size=16, <add> type_sequence_label_size=2, <add> initializer_range=0.02, <add> num_labels=3, <add> num_choices=4, <add> scope=None, <add> ): <add> self.parent = parent <add> self.batch_size = batch_size <add> self.seq_length = seq_length <add> self.is_training = is_training <add> self.use_input_mask = use_input_mask <add> self.use_token_type_ids = use_token_type_ids <add> self.use_labels = use_labels <add> self.vocab_size = vocab_size <add> self.hidden_size = hidden_size <add> self.num_hidden_layers = num_hidden_layers <add> self.num_attention_heads = num_attention_heads <add> self.intermediate_size = intermediate_size <add> self.hidden_act = hidden_act <add> self.hidden_dropout_prob = hidden_dropout_prob <add> self.attention_probs_dropout_prob = attention_probs_dropout_prob <add> self.max_position_embeddings = max_position_embeddings <add> self.type_vocab_size = type_vocab_size <add> self.type_sequence_label_size = type_sequence_label_size <add> self.initializer_range = initializer_range <add> self.num_labels = num_labels <add> self.num_choices = num_choices <add> self.scope = scope <add> <add> def prepare_config_and_inputs(self): <add> input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) <add> <add> input_mask = None <add> if self.use_input_mask: <add> input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) <add> <add> token_type_ids = None <add> if self.use_token_type_ids: <add> token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) <add> <add> sequence_labels = None <add> token_labels = None <add> choice_labels = None <add> if self.use_labels: <add> sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) <add> token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) <add> choice_labels = ids_tensor([self.batch_size], self.num_choices) <add> <add> config = BertConfig( <add> vocab_size_or_config_json_file=self.vocab_size, <add> hidden_size=self.hidden_size, <add> num_hidden_layers=self.num_hidden_layers, <add> num_attention_heads=self.num_attention_heads, <add> intermediate_size=self.intermediate_size, <add> hidden_act=self.hidden_act, <add> hidden_dropout_prob=self.hidden_dropout_prob, <add> attention_probs_dropout_prob=self.attention_probs_dropout_prob, <add> max_position_embeddings=self.max_position_embeddings, <add> type_vocab_size=self.type_vocab_size, <add> initializer_range=self.initializer_range) <add> <add> return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels <add> <add> def check_loss_output(self, result): <add> self.parent.assertListEqual( <add> list(result["loss"].size()), <add> []) <add> <add> def create_and_check_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> model = TFBertModel(config=config) <add> # model.eval() <add> inputs = {'input_ids': input_ids, <add> 'attention_mask': input_mask, <add> 'token_type_ids': token_type_ids} <add> sequence_output, pooled_output = model(inputs) <add> <add> inputs = [input_ids, input_mask] <add> sequence_output, pooled_output = model(inputs) <add> <add> sequence_output, pooled_output = model(input_ids) <add> <add> result = { <add> "sequence_output": sequence_output.numpy(), <add> "pooled_output": pooled_output.numpy(), <add> } <add> self.parent.assertListEqual( <add> list(result["sequence_output"].shape), <add> [self.batch_size, self.seq_length, self.hidden_size]) <add> self.parent.assertListEqual(list(result["pooled_output"].shape), [self.batch_size, self.hidden_size]) <add> <add> <add> def create_and_check_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # model = BertForMaskedLM(config=config) <add> # model.eval() <add> # loss, prediction_scores = model(input_ids, token_type_ids, input_mask, token_labels) <add> # result = { <add> # "loss": loss, <add> # "prediction_scores": prediction_scores, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["prediction_scores"].size()), <add> # [self.batch_size, self.seq_length, self.vocab_size]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # model = BertForNextSentencePrediction(config=config) <add> # model.eval() <add> # loss, seq_relationship_score = model(input_ids, token_type_ids, input_mask, sequence_labels) <add> # result = { <add> # "loss": loss, <add> # "seq_relationship_score": seq_relationship_score, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["seq_relationship_score"].size()), <add> # [self.batch_size, 2]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # model = BertForPreTraining(config=config) <add> # model.eval() <add> # loss, prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels) <add> # result = { <add> # "loss": loss, <add> # "prediction_scores": prediction_scores, <add> # "seq_relationship_score": seq_relationship_score, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["prediction_scores"].size()), <add> # [self.batch_size, self.seq_length, self.vocab_size]) <add> # self.parent.assertListEqual( <add> # list(result["seq_relationship_score"].size()), <add> # [self.batch_size, 2]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # model = BertForQuestionAnswering(config=config) <add> # model.eval() <add> # loss, start_logits, end_logits = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels) <add> # result = { <add> # "loss": loss, <add> # "start_logits": start_logits, <add> # "end_logits": end_logits, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["start_logits"].size()), <add> # [self.batch_size, self.seq_length]) <add> # self.parent.assertListEqual( <add> # list(result["end_logits"].size()), <add> # [self.batch_size, self.seq_length]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # config.num_labels = self.num_labels <add> # model = BertForSequenceClassification(config) <add> # model.eval() <add> # loss, logits = model(input_ids, token_type_ids, input_mask, sequence_labels) <add> # result = { <add> # "loss": loss, <add> # "logits": logits, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["logits"].size()), <add> # [self.batch_size, self.num_labels]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # config.num_labels = self.num_labels <add> # model = BertForTokenClassification(config=config) <add> # model.eval() <add> # loss, logits = model(input_ids, token_type_ids, input_mask, token_labels) <add> # result = { <add> # "loss": loss, <add> # "logits": logits, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["logits"].size()), <add> # [self.batch_size, self.seq_length, self.num_labels]) <add> # self.check_loss_output(result) <add> <add> <add> def create_and_check_bert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels): <add> pass <add> # config.num_choices = self.num_choices <add> # model = BertForMultipleChoice(config=config) <add> # model.eval() <add> # multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() <add> # multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() <add> # multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() <add> # loss, logits = model(multiple_choice_inputs_ids, <add> # multiple_choice_token_type_ids, <add> # multiple_choice_input_mask, <add> # choice_labels) <add> # result = { <add> # "loss": loss, <add> # "logits": logits, <add> # } <add> # self.parent.assertListEqual( <add> # list(result["logits"].size()), <add> # [self.batch_size, self.num_choices]) <add> # self.check_loss_output(result) <add> <add> <add> def prepare_config_and_inputs_for_common(self): <add> config_and_inputs = self.prepare_config_and_inputs() <add> (config, input_ids, token_type_ids, input_mask, <add> sequence_labels, token_labels, choice_labels) = config_and_inputs <add> inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} <add> return config, inputs_dict <add> <add> def setUp(self): <add> self.model_tester = TFBertModelTest.TFBertModelTester(self) <add> self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) <add> <add> def test_config(self): <add> self.config_tester.run_common_tests() <add> <add> def test_bert_model(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_model(*config_and_inputs) <add> <add> def test_for_masked_lm(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs) <add> <add> def test_for_multiple_choice(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs) <add> <add> def test_for_next_sequence_prediction(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs) <add> <add> def test_for_pretraining(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs) <add> <add> def test_for_question_answering(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs) <add> <add> def test_for_sequence_classification(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs) <add> <add> def test_for_token_classification(self): <add> config_and_inputs = self.model_tester.prepare_config_and_inputs() <add> self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) <add> <add> @pytest.mark.slow <add> def test_model_from_pretrained(self): <add> cache_dir = "/tmp/pytorch_transformers_test/" <add> for model_name in list(TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: <add> model = TFBertModel.from_pretrained(model_name, cache_dir=cache_dir) <add> shutil.rmtree(cache_dir) <add> self.assertIsNotNone(model) <add> <add>if __name__ == "__main__": <add> unittest.main()
5
Text
Text
fix some typos in the readme
0ebb7781c03d57e2e0c011975e85e57252d80d09
<ide><path>README.md <ide> instead of the `===` operator which determines object reference identity. <ide> <!-- runkit:activate --> <ide> ```js <ide> const { Map } = require('immutable') <del>const map1 = Map( {a: 1, b: 2, c: 3 }) <add>const map1 = Map({ a: 1, b: 2, c: 3 }) <ide> const map2 = map1.set('b', 2) <ide> assert.equal(map1, map2) // uses map1.equals <ide> assert.strictEqual(map1, map2) // uses ===
1
Mixed
Go
add options for specifying block devices
a226168a8b877d632cb87c95dd0288f6092b9d8f
<ide><path>daemon/graphdriver/devmapper/README.md <ide> Here is the list of supported options: <ide> Example use: <ide> <ide> ``docker -d --storage-opt dm.mountopt=nodiscard`` <add> <add> * `dm.datadev` <add> <add> Specifies a custom blockdevice to use for data for the thin pool. <add> <add> If using a block device for device mapper storage, ideally both <add> datadev and metadatadev should be specified to completely avoid <add> using the loopback device. <add> <add> Example use: <add> <add> ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` <add> <add> * `dm.metadatadev` <add> <add> Specifies a custom blockdevice to use for metadata for the thin <add> pool. <add> <add> For best performance the metadata should be on a different spindle <add> than the data, or even better on an SSD. <add> <add> If setting up a new metadata pool it is required to be valid. This <add> can be achieved by zeroing the first 4k to indicate empty <add> metadata, like this: <add> <add> ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` <add> <add> Example use: <add> <add> ``docker -d --storage-opt dm.datadev=/dev/sdb1 --storage-opt dm.metadatadev=/dev/sdc1`` <ide><path>daemon/graphdriver/devmapper/deviceset.go <ide> type DeviceSet struct { <ide> filesystem string <ide> mountOptions string <ide> mkfsArgs []string <add> dataDevice string <add> metadataDevice string <ide> } <ide> <ide> type DiskUsage struct { <ide> func (devices *DeviceSet) initDevmapper(doInit bool) error { <ide> if info.Exists == 0 { <ide> utils.Debugf("Pool doesn't exist. Creating it.") <ide> <del> hasData := devices.hasImage("data") <del> hasMetadata := devices.hasImage("metadata") <add> var ( <add> dataFile *os.File <add> metadataFile *os.File <add> ) <ide> <del> if !doInit && !hasData { <del> return errors.New("Loopback data file not found") <del> } <add> if devices.dataDevice == "" { <add> // Make sure the sparse images exist in <root>/devicemapper/data <ide> <del> if !doInit && !hasMetadata { <del> return errors.New("Loopback metadata file not found") <del> } <add> hasData := devices.hasImage("data") <ide> <del> createdLoopback = !hasData || !hasMetadata <del> data, err := devices.ensureImage("data", devices.dataLoopbackSize) <del> if err != nil { <del> utils.Debugf("Error device ensureImage (data): %s\n", err) <del> return err <del> } <del> metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) <del> if err != nil { <del> utils.Debugf("Error device ensureImage (metadata): %s\n", err) <del> return err <del> } <add> if !doInit && !hasData { <add> return errors.New("Loopback data file not found") <add> } <ide> <del> dataFile, err := attachLoopDevice(data) <del> if err != nil { <del> utils.Debugf("\n--->Err: %s\n", err) <del> return err <add> if !hasData { <add> createdLoopback = true <add> } <add> <add> data, err := devices.ensureImage("data", devices.dataLoopbackSize) <add> if err != nil { <add> utils.Debugf("Error device ensureImage (data): %s\n", err) <add> return err <add> } <add> <add> dataFile, err = attachLoopDevice(data) <add> if err != nil { <add> utils.Debugf("\n--->Err: %s\n", err) <add> return err <add> } <add> defer dataFile.Close() <add> } else { <add> dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) <add> if err != nil { <add> return err <add> } <ide> } <del> defer dataFile.Close() <ide> <del> metadataFile, err := attachLoopDevice(metadata) <del> if err != nil { <del> utils.Debugf("\n--->Err: %s\n", err) <del> return err <add> if devices.metadataDevice == "" { <add> // Make sure the sparse images exist in <root>/devicemapper/metadata <add> <add> hasMetadata := devices.hasImage("metadata") <add> <add> if !doInit && !hasMetadata { <add> return errors.New("Loopback metadata file not found") <add> } <add> <add> if !hasMetadata { <add> createdLoopback = true <add> } <add> <add> metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) <add> if err != nil { <add> utils.Debugf("Error device ensureImage (metadata): %s\n", err) <add> return err <add> } <add> <add> metadataFile, err = attachLoopDevice(metadata) <add> if err != nil { <add> utils.Debugf("\n--->Err: %s\n", err) <add> return err <add> } <add> defer metadataFile.Close() <add> } else { <add> metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) <add> if err != nil { <add> return err <add> } <ide> } <del> defer metadataFile.Close() <ide> <ide> if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { <ide> utils.Debugf("\n--->Err: %s\n", err) <ide> func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error <ide> devices.mkfsArgs = append(devices.mkfsArgs, val) <ide> case "dm.mountopt": <ide> devices.mountOptions = joinMountOptions(devices.mountOptions, val) <add> case "dm.metadatadev": <add> devices.metadataDevice = val <add> case "dm.datadev": <add> devices.dataDevice = val <ide> default: <ide> return nil, fmt.Errorf("Unknown option %s\n", key) <ide> }
2
Mixed
Javascript
add resizedelay option
ee74dd646a60e605308b90a5147fc7121ac9c0ee
<ide><path>docs/docs/configuration/responsive.md <ide> Namespace: `options` <ide> | `maintainAspectRatio` | `boolean` | `true` | Maintain the original canvas aspect ratio `(width / height)` when resizing. <ide> | `aspectRatio` | `number` | `2` | Canvas aspect ratio (i.e. `width / height`, a value of 1 representing a square canvas). Note that this option is ignored if the height is explicitly defined either as attribute or via the style. <ide> | `onResize` | `function` | `null` | Called when a resize occurs. Gets passed two arguments: the chart instance and the new size. <add>| `resizeDelay` | `number` | `0` | Delay the resize update by give amount of milliseconds. This can ease the resize process by debouncing update of the elements. <ide> <ide> ## Important Note <ide> <ide><path>src/core/core.controller.js <ide> import {each, callback as callCallback, uid, valueOrDefault, _elementsEqual} fro <ide> import {clearCanvas, clipArea, unclipArea, _isPointInArea} from '../helpers/helpers.canvas'; <ide> // @ts-ignore <ide> import {version} from '../../package.json'; <add>import {debounce} from '../helpers/helpers.extras'; <ide> <ide> /** <ide> * @typedef { import("../platform/platform.base").ChartEvent } ChartEvent <ide> class Chart { <ide> this.attached = false; <ide> this._animationsDisabled = undefined; <ide> this.$context = undefined; <add> this._doResize = debounce(() => this.update('resize'), options.resizeDelay || 0); <ide> <ide> // Add the chart instance to the global namespace <ide> instances[me.id] = me; <ide> class Chart { <ide> callCallback(options.onResize, [newSize], me); <ide> <ide> if (me.attached) { <del> me.update('resize'); <add> if (me._doResize()) { <add> // The resize update is delayed, only draw without updating. <add> me.render(); <add> } <ide> } <ide> } <ide> <ide><path>src/helpers/helpers.extras.js <ide> export function throttled(fn, thisArg, updateFn) { <ide> }; <ide> } <ide> <add>/** <add> * Debounces calling `fn` for `delay` ms <add> * @param {function} fn - Function to call. No arguments are passed. <add> * @param {number} delay - Delay in ms. 0 = immediate invocation. <add> * @returns {function} <add> */ <add>export function debounce(fn, delay) { <add> let timeout; <add> return function() { <add> if (delay) { <add> clearTimeout(timeout); <add> timeout = setTimeout(fn, delay); <add> } else { <add> fn(); <add> } <add> return delay; <add> }; <add>} <add> <ide> <ide> /** <ide> * Converts 'start' to 'left', 'end' to 'right' and others to 'center'
3
Python
Python
allow invalid ssl certs on onapp
b7b1fb999e62372640b46a76fa2790a0f74e39d5
<ide><path>libcloud/compute/drivers/onapp.py <ide> class OnAppNodeDriver(NodeDriver): <ide> <ide> def __init__(self, key=None, secret=None, <ide> host='onapp.com', port=443, <add> verify=True <ide> ): <ide> """ <ide> :param key: username <ide> def __init__(self, key=None, secret=None, <ide> except: <ide> raise Exception("Make sure onapp host is accessible and port " <ide> "%s is open" % port) <add> # do not verify SSL certificate <add> if not verify: <add> self.connection.connection.ca_cert = False <ide> <ide> def create_node(self, name, ex_memory, ex_cpus, ex_cpu_shares, <ide> ex_hostname, ex_template_id, ex_primary_disk_size,
1
Python
Python
improve windows autoconfiguration
bc9dc649157dd426202c86d41ce50091243a88b6
<ide><path>numpy/distutils/system_info.py <ide> def libpaths(paths, bits): <ide> default_src_dirs = ['.'] <ide> default_x11_lib_dirs = [] <ide> default_x11_include_dirs = [] <del> vcpkg_include_dirs = [ <add> _include_dirs = [ <ide> 'include', <ide> 'include/suitesparse', <ide> ] <del> vcpkg_lib_dirs = [ <add> _lib_dirs = [ <ide> 'lib', <ide> ] <add> <add> _include_dirs = [d.replace('/', os.sep) for d in _include_dirs] <add> _lib_dirs = [d.replace('/', os.sep) for d in _lib_dirs] <add> def add_system_root(library_root): <add> """Add a package manager root to the include directories""" <add> global default_lib_dirs <add> global default_include_dirs <add> <add> library_root = os.path.normpath(library_root) <add> <add> default_lib_dirs.extend( <add> os.path.join(library_root, d) for d in _lib_dirs) <add> default_include_dirs.extend( <add> os.path.join(library_root, d) for d in _include_dirs) <add> <ide> if sys.version_info >= (3, 3): <ide> # VCpkg is the de-facto package manager on windows for C/C++ <ide> # libraries. If it is on the PATH, then we append its paths here. <ide> def libpaths(paths, bits): <ide> specifier = 'x86' <ide> else: <ide> specifier = 'x64' <del> vcpkg_root = os.path.join( <del> vcpkg_dir, 'installed', specifier + '-windows') <del> <del> default_lib_dirs.extend( <del> os.path.join( <del> vcpkg_root, d.replace('/', os.sep)) for d in vcpkg_lib_dirs) <del> default_include_dirs.extend( <del> os.path.join( <del> vcpkg_root, d.replace('/', os.sep)) for d in vcpkg_include_dirs) <add> <add> vcpkg_installed = os.path.join(vcpkg_dir, 'installed') <add> for vcpkg_root in [ <add> os.path.join(vcpkg_installed, specifier + '-windows'), <add> os.path.join(vcpkg_installed, specifier + '-windows-static'), <add> ]: <add> add_system_root(vcpkg_root) <add> <add> # Conda is another popular package manager that provides libraries <add> conda = shutil.which('conda') <add> if conda: <add> conda_dir = os.path.dirname(conda) <add> add_system_root(os.path.join(conda_dir, '..', 'Library')) <add> add_system_root(os.path.join(conda_dir, 'Library')) <add> <ide> else: <ide> default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', <ide> '/opt/local/lib', '/sw/lib'], platform_bits) <ide> def get_info(name, notfound_action=0): <ide> 'openblas': openblas_info, # use blas_opt instead <ide> # openblas with embedded lapack <ide> 'openblas_lapack': openblas_lapack_info, # use blas_opt instead <add> 'openblas_clapack': openblas_clapack_info, # use blas_opt instead <ide> 'blis': blis_info, # use blas_opt instead <ide> 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead <ide> 'blas_mkl': blas_mkl_info, # use blas_opt instead <ide> def calc_info(self): <ide> self.set_info(**openblas_info) <ide> return <ide> <add> openblas_info = get_info('openblas_clapack') <add> if openblas_info: <add> self.set_info(**openblas_info) <add> return <add> <ide> atlas_info = get_info('atlas_3_10_threads') <ide> if not atlas_info: <ide> atlas_info = get_info('atlas_3_10') <ide> def check_embedded_lapack(self, info): <ide> shutil.rmtree(tmpdir) <ide> return res <ide> <add>class openblas_clapack_info(openblas_lapack_info): <add> _lib_names = ['openblas', 'lapack'] <ide> <ide> class blis_info(blas_info): <ide> section = 'blis'
1
Text
Text
clarify http2 docs around class exports
d3d59821dd136a36dc2e1b4a7d02c936762240f4
<ide><path>doc/api/http2.md <ide> have occasion to work with the `Http2Session` object directly, with most <ide> actions typically taken through interactions with either the `Http2Server` or <ide> `Http2Stream` objects. <ide> <add>User code will not create `Http2Session` instances directly. Server-side <add>`Http2Session` instances are created by the `Http2Server` instance when a <add>new HTTP/2 connection is received. Client-side `Http2Session` instances are <add>created using the `http2.connect()` method. <add> <ide> #### `Http2Session` and Sockets <ide> <ide> Every `Http2Session` instance is associated with exactly one [`net.Socket`][] or <ide> added: v8.4.0 <ide> <ide> * Extends: {net.Server} <ide> <add>Instances of `Http2Server` are created using the `http2.createServer()` <add>function. The `Http2Server` class is not exported directly by the `http2` <add>module. <add> <ide> #### Event: 'checkContinue' <ide> <!-- YAML <ide> added: v8.5.0 <ide> added: v8.4.0 <ide> <ide> * Extends: {tls.Server} <ide> <add>Instances of `Http2SecureServer` are created using the <add>`http2.createSecureServer()` function. The `Http2SecureServer` class is not <add>exported directly by the `http2` module. <add> <ide> #### Event: 'checkContinue' <ide> <!-- YAML <ide> added: v8.5.0
1
Text
Text
remove refs to old openssl list-* commands
9412441da01d14167a42557e8fe7e42cfc4309a2
<ide><path>doc/api/crypto.md <ide> option is not required but can be used to set the length of the authentication <ide> tag that will be returned by `getAuthTag()` and defaults to 16 bytes. <ide> <ide> The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On <del>recent OpenSSL releases, `openssl list -cipher-algorithms` <del>(`openssl list-cipher-algorithms` for older versions of OpenSSL) will <add>recent OpenSSL releases, `openssl list -cipher-algorithms` will <ide> display the available cipher algorithms. <ide> <ide> The `password` is used to derive the cipher key and initialization vector (IV). <ide> option is not required but can be used to set the length of the authentication <ide> tag that will be returned by `getAuthTag()` and defaults to 16 bytes. <ide> <ide> The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On <del>recent OpenSSL releases, `openssl list -cipher-algorithms` <del>(`openssl list-cipher-algorithms` for older versions of OpenSSL) will <add>recent OpenSSL releases, `openssl list -cipher-algorithms` will <ide> display the available cipher algorithms. <ide> <ide> The `key` is the raw key used by the `algorithm` and `iv` is an <ide> option is not required but can be used to restrict accepted authentication tags <ide> to those with the specified length. <ide> <ide> The `algorithm` is dependent on OpenSSL, examples are `'aes192'`, etc. On <del>recent OpenSSL releases, `openssl list -cipher-algorithms` <del>(`openssl list-cipher-algorithms` for older versions of OpenSSL) will <add>recent OpenSSL releases, `openssl list -cipher-algorithms` will <ide> display the available cipher algorithms. <ide> <ide> The `key` is the raw key used by the `algorithm` and `iv` is an <ide> can be used to specify the desired output length in bytes. <ide> <ide> The `algorithm` is dependent on the available algorithms supported by the <ide> version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. <del>On recent releases of OpenSSL, `openssl list -digest-algorithms` <del>(`openssl list-message-digest-algorithms` for older versions of OpenSSL) will <add>On recent releases of OpenSSL, `openssl list -digest-algorithms` will <ide> display the available digest algorithms. <ide> <ide> Example: generating the sha256 sum of a file <ide> Optional `options` argument controls stream behavior. <ide> <ide> The `algorithm` is dependent on the available algorithms supported by the <ide> version of OpenSSL on the platform. Examples are `'sha256'`, `'sha512'`, etc. <del>On recent releases of OpenSSL, `openssl list -digest-algorithms` <del>(`openssl list-message-digest-algorithms` for older versions of OpenSSL) will <add>On recent releases of OpenSSL, `openssl list -digest-algorithms` will <ide> display the available digest algorithms. <ide> <ide> The `key` is the HMAC key used to generate the cryptographic HMAC hash. If it is
1
Javascript
Javascript
extend size() to take ownpropsonly param
96a1df192a167e6e34988af687693506f4efd1d1
<ide><path>src/Angular.js <ide> function map(obj, iterator, context) { <ide> * @function <ide> * <ide> * @description <del> * Determines the number of elements in an array or number of properties of an object. <add> * Determines the number of elements in an array, number of properties of an object or string <add> * length. <ide> * <ide> * Note: this function is used to augment the Object type in angular expressions. See <ide> * {@link angular.Object} for more info. <ide> * <del> * @param {Object|Array} obj Object or array to inspect. <add> * @param {Object|Array|string} obj Object, array or string to inspect. <add> * @param {boolean} [ownPropsOnly=false] Count only "own" properties in an object <ide> * @returns {number} The size of `obj` or `0` if `obj` is neither an object or an array. <ide> * <ide> * @example <ide> function map(obj, iterator, context) { <ide> * </doc:scenario> <ide> * </doc:example> <ide> */ <del>function size(obj) { <add>function size(obj, ownPropsOnly) { <ide> var size = 0, key; <ide> if (obj) { <ide> if (isNumber(obj.length)) { <ide> return obj.length; <ide> } else if (isObject(obj)){ <ide> for (key in obj) <del> size++; <add> if (!ownPropsOnly || obj.hasOwnProperty(key)) <add> size++; <ide> } <ide> } <ide> return size; <ide><path>src/widgets.js <ide> angularWidget('@ng:repeat', function(expression, element){ <ide> childCount = children.length, <ide> lastIterElement = iterStartElement, <ide> collection = this.$tryEval(rhs, iterStartElement), <del> is_array = isArray(collection), <del> collectionLength = 0, <add> collectionLength = size(collection, true), <ide> childScope, <ide> key; <ide> <del> if (is_array) { <del> collectionLength = collection.length; <del> } else { <del> for (key in collection) <del> if (collection.hasOwnProperty(key)) <del> collectionLength++; <del> } <del> <ide> for (key in collection) { <ide> if (collection.hasOwnProperty(key)) { <ide> if (index < childCount) { <ide><path>test/AngularSpec.js <ide> describe('angular', function(){ <ide> }); <ide> }); <ide> <add> <add> describe('size', function() { <add> it('should return the number of items in an array', function() { <add> expect(size([])).toBe(0); <add> expect(size(['a', 'b', 'c'])).toBe(3); <add> }); <add> <add> it('should return the number of properties of an object', function() { <add> expect(size({})).toBe(0); <add> expect(size({a:1, b:'a', c:noop})).toBe(3); <add> }); <add> <add> it('should return the number of own properties of an object', function() { <add> var obj = inherit({protoProp: 'c', protoFn: noop}, {a:1, b:'a', c:noop}); <add> <add> expect(size(obj)).toBe(5); <add> expect(size(obj, true)).toBe(3); <add> }); <add> <add> it('should return the string length', function() { <add> expect(size('')).toBe(0); <add> expect(size('abc')).toBe(3); <add> }); <add> }); <add> <add> <ide> describe('parseKeyValue', function() { <ide> it('should parse a string into key-value pairs', function() { <ide> expect(parseKeyValue('')).toEqual({});
3
Javascript
Javascript
fix the test to work on windows
26b11915b1c16440468a4b5f4b07d2409b98c68c
<ide><path>test/simple/test-domain.js <ide> var e = new events.EventEmitter(); <ide> <ide> d.on('error', function(er) { <ide> console.error('caught', er); <del> switch (er.message) { <add> <add> var er_message = er.message; <add> var er_path = er.path <add> <add> // On windows, error messages can contain full path names. If this is the <add> // case, remove the directory part. <add> if (typeof er_path === 'string') { <add> var slash = er_path.lastIndexOf('\\'); <add> if (slash !== -1) { <add> var dir = er_path.slice(0, slash + 1); <add> er_path = er_path.replace(dir, ''); <add> er_message = er_message.replace(dir, ''); <add> } <add> } <add> <add> switch (er_message) { <ide> case 'emitted': <ide> assert.equal(er.domain, d); <ide> assert.equal(er.domain_emitter, e); <ide> d.on('error', function(er) { <ide> assert.equal(typeof er.domain_bound, 'function'); <ide> assert.ok(!er.domain_emitter); <ide> assert.equal(er.code, 'ENOENT'); <del> assert.equal(er.path, 'this file does not exist'); <add> assert.equal(er_path, 'this file does not exist'); <ide> assert.equal(typeof er.errno, 'number'); <ide> break; <ide> <ide> case "ENOENT, open 'stream for nonexistent file'": <ide> assert.equal(typeof er.errno, 'number'); <ide> assert.equal(er.code, 'ENOENT'); <del> assert.equal(er.path, 'stream for nonexistent file'); <add> assert.equal(er_path, 'stream for nonexistent file'); <ide> assert.equal(er.domain, d); <ide> assert.equal(er.domain_emitter, fst); <ide> assert.ok(!er.domain_bound);
1
Text
Text
update the rails security guide [ci skip]
a3aa53684eb8c39db4aa03bc69b8ee15deadeb80
<ide><path>guides/source/security.md <ide> User.find(session[:user_id]) <ide> <ide> ### Session id <ide> <del>NOTE: _The session id is a 32 byte long MD5 hash value._ <add>NOTE: _The session id is a 32-character random hex string._ <ide> <del>A session id consists of the hash value of a random string. The random string is the current time, a random number between 0 and 1, the process id number of the Ruby interpreter (also basically a random number) and a constant string. Currently it is not feasible to brute-force Rails' session ids. To date MD5 is uncompromised, but there have been collisions, so it is theoretically possible to create another input text with the same hash value. But this has had no security impact to date. <add>The session id is generated using `SecureRandom.hex` which generates a random hex string using platform specific methods (such as openssl, /dev/urandom or win32) for generating cryptographically secure random numbers. Currently it is not feasible to brute-force Rails' session ids. <ide> <ide> ### Session Hijacking <ide>
1
Javascript
Javascript
share socket._haderror with http_client
dc50f27d5260fcb4568be3d43b7dcc898355e748
<ide><path>lib/_http_client.js <ide> function socketCloseListener() { <ide> res.emit('close'); <ide> }); <ide> res.push(null); <del> } else if (!req.res && !req._hadError) { <add> } else if (!req.res && !req.socket._hadError) { <ide> // This socket error fired before we started to <ide> // receive a response. The error needs to <ide> // fire on the request. <ide> req.emit('error', createHangUpError()); <del> req._hadError = true; <add> req.socket._hadError = true; <ide> } <ide> <ide> // Too bad. That output wasn't getting written. <ide> function socketErrorListener(err) { <ide> req.emit('error', err); <ide> // For Safety. Some additional errors might fire later on <ide> // and we need to make sure we don't double-fire the error event. <del> req._hadError = true; <add> req.socket._hadError = true; <ide> } <ide> <ide> if (parser) { <ide> function socketOnEnd() { <ide> var req = this._httpMessage; <ide> var parser = this.parser; <ide> <del> if (!req.res) { <add> if (!req.res && !req.socket._hadError) { <ide> // If we don't have a response then we know that the socket <ide> // ended prematurely and we need to emit an error on the request. <ide> req.emit('error', createHangUpError()); <del> req._hadError = true; <add> req.socket._hadError = true; <ide> } <ide> if (parser) { <ide> parser.finish(); <ide> function socketOnData(d, start, end) { <ide> freeParser(parser, req); <ide> socket.destroy(); <ide> req.emit('error', ret); <del> req._hadError = true; <add> req.socket._hadError = true; <ide> } else if (parser.incoming && parser.incoming.upgrade) { <ide> // Upgrade or CONNECT <ide> var bytesParsed = ret; <ide><path>lib/_tls_wrap.js <ide> exports.connect = function(/* [port, host], options, cb */) { <ide> }); <ide> <ide> function onHangUp() { <del> var error = new Error('socket hang up'); <del> error.code = 'ECONNRESET'; <del> socket.destroy(); <del> socket.emit('error', error); <add> // NOTE: This logic is shared with _http_client.js <add> if (!socket._hadError) { <add> socket._hadError = true; <add> var error = new Error('socket hang up'); <add> error.code = 'ECONNRESET'; <add> socket.destroy(); <add> socket.emit('error', error); <add> } <ide> } <ide> socket.once('end', onHangUp); <ide> } <ide><path>lib/net.js <ide> function Socket(options) { <ide> if (!(this instanceof Socket)) return new Socket(options); <ide> <ide> this._connecting = false; <add> this._hadError = false; <ide> this._handle = null; <ide> <ide> switch (typeof options) {
3
Python
Python
fix evaluation with label smoothing in trainer
461e8cacf94d1f76367cc9ba2cfd5b9bd3641c81
<ide><path>src/transformers/trainer.py <ide> def prediction_step( <ide> else: <ide> ignore_keys = [] <ide> <add> # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. <add> if has_labels: <add> labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) <add> if len(labels) == 1: <add> labels = labels[0] <add> else: <add> labels = None <add> <ide> with torch.no_grad(): <ide> if has_labels: <ide> loss, outputs = self.compute_loss(model, inputs, return_outputs=True) <ide> def prediction_step( <ide> if len(logits) == 1: <ide> logits = logits[0] <ide> <del> if has_labels: <del> labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) <del> if len(labels) == 1: <del> labels = labels[0] <del> else: <del> labels = None <del> <ide> return (loss, logits, labels) <ide> <ide> def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
1
Text
Text
fix broken list formatting [ci skip]
ce47479be03b1c56f5e24b67f9a5802ff30adcc2
<ide><path>railties/CHANGELOG.md <ide> * Deprecate `rails notes` subcommands in favor of passing an `annotations` argument to `rails notes`. <ide> <ide> The following subcommands are replaced by passing `--annotations` or `-a` to `rails notes`: <del> - `rails notes:custom ANNOTATION=custom` is deprecated in favor of using `rails notes -a custom`. <del> - `rails notes:optimize` is deprecated in favor of using `rails notes -a OPTIMIZE`. <del> - `rails notes:todo` is deprecated in favor of using`rails notes -a TODO`. <del> - `rails notes:fixme` is deprecated in favor of using `rails notes -a FIXME`. <add> - `rails notes:custom ANNOTATION=custom` is deprecated in favor of using `rails notes -a custom`. <add> - `rails notes:optimize` is deprecated in favor of using `rails notes -a OPTIMIZE`. <add> - `rails notes:todo` is deprecated in favor of using`rails notes -a TODO`. <add> - `rails notes:fixme` is deprecated in favor of using `rails notes -a FIXME`. <ide> <ide> *Annie-Claude Côté* <ide>
1
PHP
PHP
fix most coding standards in case/network
d0733ceb00e56aa68c555c28fb569dac36ec9941
<ide><path>lib/Cake/Test/Case/Network/CakeRequestTest.php <ide> App::uses('CakeRequest', 'Network'); <ide> <ide> class CakeRequestTest extends CakeTestCase { <add> <ide> /** <ide> * setup callback <ide> * <ide> public function testQueryStringParsingFromInputUrl() { <ide> $expected = array('one' => 'something', 'two' => 'else'); <ide> $this->assertEquals($expected, $request->query); <ide> $this->assertEquals('some/path?one=something&two=else', $request->url); <del> <ide> } <ide> <ide> /** <ide> public function testAddPaths() { <ide> $this->assertFalse(isset($request->random)); <ide> } <ide> <del> <ide> /** <ide> * test parsing POST data into the object. <ide> * <ide> public function testFILESParsing() { <ide> ); <ide> $this->assertEquals($request->data, $expected); <ide> <del> <ide> $_FILES = array( <ide> 'data' => array( <ide> 'name' => array('birth_cert' => 'born on.txt'), <ide> public function testFILESParsing() { <ide> ); <ide> $request = new CakeRequest('some/path'); <ide> $this->assertEquals($request->params['form'], $_FILES); <del> <ide> } <ide> <ide> /** <ide> public function testisAjaxFlashAndFriends() { <ide> * @expectedException CakeException <ide> * @return void <ide> */ <del> public function test__callExceptionOnUnknownMethod() { <add> public function testMagicCallExceptionOnUnknownMethod() { <ide> $request = new CakeRequest('some/path'); <ide> $request->IamABanana(); <ide> } <ide> public function testIsSsl() { <ide> * <ide> * @return void <ide> */ <del> public function test__get() { <add> public function testMagicget() { <ide> $request = new CakeRequest('some/path'); <ide> $request->params = array('controller' => 'posts', 'action' => 'view', 'plugin' => 'blogs'); <ide> <ide> public function test__get() { <ide> * <ide> * @return void <ide> */ <del> public function test__isset() { <add> public function testMagicisset() { <ide> $request = new CakeRequest('some/path'); <ide> $request->params = array( <ide> 'controller' => 'posts', <ide> public function testAddDetector() { <ide> <ide> $_SERVER['TEST_VAR'] = 'wrong'; <ide> $this->assertFalse($request->is('compare'), 'Value mis-match failed.'); <del> <add> <ide> $request->addDetector('compareCamelCase', array('env' => 'TEST_VAR', 'value' => 'foo')); <ide> <ide> $_SERVER['TEST_VAR'] = 'foo'; <ide> public function testBaseUrlAndWebrootWithModRewrite() { <ide> $this->assertEquals($request->webroot, '/1.2.x.x/'); <ide> $this->assertEquals($request->url, 'posts/view/1'); <ide> <del> <ide> $_SERVER['DOCUMENT_ROOT'] = '/cake/repo/branches/1.2.x.x/app/webroot'; <ide> $_SERVER['PHP_SELF'] = '/index.php'; <ide> $_SERVER['PATH_INFO'] = '/posts/add'; <ide> public function testBaseUrlAndWebrootWithModRewrite() { <ide> $this->assertEquals('', $request->base); <ide> $this->assertEquals('/', $request->webroot); <ide> <del> <ide> $_SERVER['DOCUMENT_ROOT'] = '/some/apps/where'; <ide> $_SERVER['PHP_SELF'] = '/app/webroot/index.php'; <ide> $request = new CakeRequest(); <ide> public function testInputDecodeExtraParams() { <ide> ); <ide> } <ide> <del> <ide> /** <ide> * Test is('requested') and isRequested() <ide> * <ide><path>lib/Cake/Test/Case/Network/CakeResponseTest.php <ide> public function tearDown() { <ide> } <ide> <ide> /** <del>* Tests the request object constructor <del>* <del>*/ <add> * Tests the request object constructor <add> * <add> */ <ide> public function testConstruct() { <ide> $response = new CakeResponse(); <ide> $this->assertNull($response->body()); <ide> public function testConstruct() { <ide> } <ide> <ide> /** <del>* Tests the body method <del>* <del>*/ <add> * Tests the body method <add> * <add> */ <ide> public function testBody() { <ide> $response = new CakeResponse(); <ide> $this->assertNull($response->body()); <ide> public function testBody() { <ide> } <ide> <ide> /** <del>* Tests the charset method <del>* <del>*/ <add> * Tests the charset method <add> * <add> */ <ide> public function testCharset() { <ide> $response = new CakeResponse(); <ide> $this->assertEquals($response->charset(), 'UTF-8'); <ide> public function testCharset() { <ide> } <ide> <ide> /** <del>* Tests the statusCode method <del>* <del>* @expectedException CakeException <del>*/ <add> * Tests the statusCode method <add> * <add> * @expectedException CakeException <add> */ <ide> public function testStatusCode() { <ide> $response = new CakeResponse(); <ide> $this->assertEquals($response->statusCode(), 200); <ide> public function testStatusCode() { <ide> } <ide> <ide> /** <del>* Tests the type method <del>* <del>*/ <add> * Tests the type method <add> * <add> */ <ide> public function testType() { <ide> $response = new CakeResponse(); <ide> $this->assertEquals($response->type(), 'text/html'); <ide> public function testType() { <ide> } <ide> <ide> /** <del>* Tests the header method <del>* <del>*/ <add> * Tests the header method <add> * <add> */ <ide> public function testHeader() { <ide> $response = new CakeResponse(); <ide> $headers = array(); <ide> public function testHeader() { <ide> } <ide> <ide> /** <del>* Tests the send method <del>* <del>*/ <add> * Tests the send method <add> * <add> */ <ide> public function testSend() { <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent', '_setCookies')); <ide> $response->header(array( <ide> public function testSend() { <ide> } <ide> <ide> /** <del>* Tests the send method and changing the content type <del>* <del>*/ <add> * Tests the send method and changing the content type <add> * <add> */ <ide> public function testSendChangingContentYype() { <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent', '_setCookies')); <ide> $response->type('mp3'); <ide> public function testSendChangingContentYype() { <ide> } <ide> <ide> /** <del>* Tests the send method and changing the content type <del>* <del>*/ <add> * Tests the send method and changing the content type <add> * <add> */ <ide> public function testSendChangingContentType() { <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent', '_setCookies')); <ide> $response->type('mp3'); <ide> public function testSendChangingContentType() { <ide> } <ide> <ide> /** <del>* Tests the send method and changing the content type <del>* <del>*/ <add> * Tests the send method and changing the content type <add> * <add> */ <ide> public function testSendWithLocation() { <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent', '_setCookies')); <ide> $response->header('Location', 'http://www.example.com'); <ide> public function testSendWithLocation() { <ide> $response->expects($this->at(2)) <ide> ->method('_sendHeader')->with('Location', 'http://www.example.com'); <ide> $response->expects($this->at(3)) <del> ->method('_sendHeader')->with('Content-Type', 'text/html; charset=UTF-8'); <add> ->method('_sendHeader')->with('Content-Type', 'text/html; charset=UTF-8'); <ide> $response->send(); <ide> } <ide> <ide> /** <del>* Tests the disableCache method <del>* <del>*/ <add> * Tests the disableCache method <add> * <add> */ <ide> public function testDisableCache() { <ide> $response = new CakeResponse(); <ide> $expected = array( <ide> public function testDisableCache() { <ide> } <ide> <ide> /** <del>* Tests the cache method <del>* <del>*/ <add> * Tests the cache method <add> * <add> */ <ide> public function testCache() { <ide> $response = new CakeResponse(); <ide> $since = time(); <ide> public function testCompress() { <ide> } <ide> <ide> /** <del>* Tests the httpCodes method <del>* <del>*/ <add> * Tests the httpCodes method <add> * <add> */ <ide> public function testHttpCodes() { <ide> $response = new CakeResponse(); <ide> $result = $response->httpCodes(); <ide> $this->assertEquals(count($result), 39); <ide> <del> $result = $response->httpCodes(100); <add> $result = $response->httpCodes(100); <ide> $expected = array(100 => 'Continue'); <ide> $this->assertEquals($expected, $result); <ide> <ide> public function testHttpCodes() { <ide> 1729 => 'Hardy-Ramanujan Located' <ide> ); <ide> <del> $result = $response->httpCodes($codes); <add> $result = $response->httpCodes($codes); <ide> $this->assertTrue($result); <ide> $this->assertEquals(count($response->httpCodes()), 41); <ide> <ide> public function testHttpCodes() { <ide> } <ide> <ide> /** <del>* Tests the download method <del>* <del>*/ <add> * Tests the download method <add> * <add> */ <ide> public function testDownload() { <ide> $response = new CakeResponse(); <ide> $expected = array( <ide> public function testDownload() { <ide> } <ide> <ide> /** <del>* Tests the mapType method <del>* <del>*/ <add> * Tests the mapType method <add> * <add> */ <ide> public function testMapType() { <ide> $response = new CakeResponse(); <ide> $this->assertEquals('wav', $response->mapType('audio/x-wav')); <ide> public function testMapType() { <ide> } <ide> <ide> /** <del>* Tests the outputCompressed method <del>* <del>*/ <add> * Tests the outputCompressed method <add> * <add> */ <ide> public function testOutputCompressed() { <ide> $response = new CakeResponse(); <ide> <ide> public function testOutputCompressed() { <ide> } <ide> <ide> /** <del>* Tests the send and setting of Content-Length <del>* <del>*/ <add> * Tests the send and setting of Content-Length <add> * <add> */ <ide> public function testSendContentLength() { <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent')); <ide> $response->body('the response body'); <ide> public function testSendContentLength() { <ide> $response->header('Content-Length', 1); <ide> $response->expects($this->never())->method('outputCompressed'); <ide> $response->expects($this->once())->method('_sendContent')->with($body); <del> $response->expects($this->at(1)) <add> $response->expects($this->at(1)) <ide> ->method('_sendHeader')->with('Content-Length', 1); <ide> $response->send(); <ide> <ide> public function testSharable() { <ide> ->method('_sendHeader')->with('Cache-Control', 'public'); <ide> $response->send(); <ide> <del> <ide> $response = $this->getMock('CakeResponse', array('_sendHeader', '_sendContent')); <ide> $response->sharable(false); <ide> $headers = $response->header(); <ide> public function testMustRevalidate() { <ide> $response->expects($this->at(1)) <ide> ->method('_sendHeader')->with('Cache-Control', 's-maxage=3600, must-revalidate'); <ide> $response->send(); <del> <ide> } <ide> <ide> /** <ide> public function testNotModified() { <ide> **/ <ide> public function testCheckNotModifiedByEtagStar() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = '*'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->etag('something'); <ide> $response->expects($this->once())->method('notModified'); <ide> $response->checkNotModified(new CakeRequest); <ide> public function testCheckNotModifiedByEtagStar() { <ide> **/ <ide> public function testCheckNotModifiedByEtagExact() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = 'W/"something", "other"'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->etag('something', true); <ide> $response->expects($this->once())->method('notModified'); <ide> $this->assertTrue($response->checkNotModified(new CakeRequest)); <ide> public function testCheckNotModifiedByEtagExact() { <ide> public function testCheckNotModifiedByEtagAndTime() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = 'W/"something", "other"'; <ide> $_SERVER['HTTP_IF_MODIFIED_SINCE'] = '2012-01-01 00:00:00'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->etag('something', true); <ide> $response->modified('2012-01-01 00:00:00'); <ide> $response->expects($this->once())->method('notModified'); <ide> public function testCheckNotModifiedByEtagAndTime() { <ide> public function testCheckNotModifiedByEtagAndTimeMismatch() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = 'W/"something", "other"'; <ide> $_SERVER['HTTP_IF_MODIFIED_SINCE'] = '2012-01-01 00:00:00'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->etag('something', true); <ide> $response->modified('2012-01-01 00:00:01'); <ide> $response->expects($this->never())->method('notModified'); <ide> public function testCheckNotModifiedByEtagAndTimeMismatch() { <ide> public function testCheckNotModifiedByEtagMismatch() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = 'W/"something-else", "other"'; <ide> $_SERVER['HTTP_IF_MODIFIED_SINCE'] = '2012-01-01 00:00:00'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->etag('something', true); <ide> $response->modified('2012-01-01 00:00:00'); <ide> $response->expects($this->never())->method('notModified'); <ide> $this->assertFalse($response->checkNotModified(new CakeRequest)); <ide> } <ide> <del> <ide> /** <ide> * Test checkNotModified method <ide> * <ide> * @return void <ide> **/ <ide> public function testCheckNotModifiedByTime() { <ide> $_SERVER['HTTP_IF_MODIFIED_SINCE'] = '2012-01-01 00:00:00'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->modified('2012-01-01 00:00:00'); <ide> $response->expects($this->once())->method('notModified'); <ide> $this->assertTrue($response->checkNotModified(new CakeRequest)); <ide> public function testCheckNotModifiedByTime() { <ide> public function testCheckNotModifiedNoHints() { <ide> $_SERVER['HTTP_IF_NONE_MATCH'] = 'W/"something", "other"'; <ide> $_SERVER['HTTP_IF_MODIFIED_SINCE'] = '2012-01-01 00:00:00'; <del> $response = $this->getMock('CakeResponse', array('notModified')); <add> $response = $this->getMock('CakeResponse', array('notModified')); <ide> $response->expects($this->never())->method('notModified'); <ide> $this->assertFalse($response->checkNotModified(new CakeRequest)); <ide> } <ide><path>lib/Cake/Test/Case/Network/Email/CakeEmailTest.php <ide> public function getBoundary() { <ide> public function encode($text) { <ide> return $this->_encode($text); <ide> } <add> <ide> } <ide> <ide> /* <ide> public function setUp() { <ide> $this->CakeEmail = new TestCakeEmail(); <ide> <ide> App::build(array( <del> 'View' => array(CAKE . 'Test' . DS . 'test_app' . DS . 'View'. DS) <add> 'View' => array(CAKE . 'Test' . DS . 'test_app' . DS . 'View' . DS) <ide> )); <ide> } <ide> <ide> public function testFormatAddressJapanese() { <ide> $this->assertSame($expected, $result); <ide> <ide> $result = $this->CakeEmail->formatAddress(array('cake@cakephp.org' => '寿限無寿限無五劫の擦り切れ海砂利水魚の水行末雲来末風来末食う寝る処に住む処やぶら小路の藪柑子パイポパイポパイポのシューリンガンシューリンガンのグーリンダイグーリンダイのポンポコピーのポンポコナーの長久命の長助')); <del> $expected = array("=?ISO-2022-JP?B?GyRCPHc4Qkw1PHc4Qkw1OF45ZSROOyQkakBaJGwzJDo9TXg/ZTV7GyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCJE4/ZTlUS3YxQE1oS3ZJd01oS3Y/KSQmPzIkaz1oJEs9OyRgGyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCPWgkZCRWJGk+Lk8pJE5pLjQ7O1IlUSUkJV0lUSUkJV0lUSUkGyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCJV0kTiU3JWUhPCVqJXMlLCVzJTclZSE8JWolcyUsJXMkTiUwGyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCITwlaiVzJUAlJCUwITwlaiVzJUAlJCROJV0lcyVdJTMlVCE8GyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCJE4lXSVzJV0lMyVKITwkTkQ5NVdMPyRORDk9dRsoQg==?= <cake@cakephp.org>"); <add> $expected = array("=?ISO-2022-JP?B?GyRCPHc4Qkw1PHc4Qkw1OF45ZSROOyQkakBaJGwzJDo9TXg/ZTV7GyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCJE4/ZTlUS3YxQE1oS3ZJd01oS3Y/KSQmPzIkaz1oJEs9OyRgGyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCPWgkZCRWJGk+Lk8pJE5pLjQ7O1IlUSUkJV0lUSUkJV0lUSUkGyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCJV0kTiU3JWUhPCVqJXMlLCVzJTclZSE8JWolcyUsJXMkTiUwGyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCITwlaiVzJUAlJCUwITwlaiVzJUAlJCROJV0lcyVdJTMlVCE8GyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCJE4lXSVzJV0lMyVKITwkTkQ5NVdMPyRORDk9dRsoQg==?= <cake@cakephp.org>"); <ide> $this->assertSame($expected, $result); <ide> } <ide> <ide> public function testSubjectJapanese() { <ide> $this->assertSame($this->CakeEmail->subject(), $expected); <ide> <ide> $this->CakeEmail->subject('長い長い長いSubjectの場合はfoldingするのが正しいんだけどいったいどうなるんだろう?'); <del> $expected = "=?ISO-2022-JP?B?GyRCRDkkJEQ5JCREOSQkGyhCU3ViamVjdBskQiROPmw5ZyRPGyhCZm9s?=\r\n" <del> ." =?ISO-2022-JP?B?ZGluZxskQiQ5JGskTiQsQDUkNyQkJHMkQCQxJEkkJCRDJD8kJCRJGyhC?=\r\n" <del> ." =?ISO-2022-JP?B?GyRCJCYkSiRrJHMkQCRtJCYhKRsoQg==?="; <add> $expected = "=?ISO-2022-JP?B?GyRCRDkkJEQ5JCREOSQkGyhCU3ViamVjdBskQiROPmw5ZyRPGyhCZm9s?=\r\n" . <add> " =?ISO-2022-JP?B?ZGluZxskQiQ5JGskTiQsQDUkNyQkJHMkQCQxJEkkJCRDJD8kJCRJGyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCJCYkSiRrJHMkQCRtJCYhKRsoQg==?="; <ide> $this->assertSame($this->CakeEmail->subject(), $expected); <ide> } <ide> <del> <ide> /** <ide> * testHeaders method <ide> * <ide> public function testConfig() { <ide> <ide> $this->CakeEmail->config(array()); <ide> $this->assertSame($transportClass->config(), array()); <del> <ide> } <ide> <ide> /** <ide> public function testSendWithInlineAttachments() { <ide> $this->assertContains($expected, $result['message']); <ide> $this->assertContains('--rel-' . $boundary . '--', $result['message']); <ide> $this->assertContains('--' . $boundary . '--', $result['message']); <del>} <add> } <ide> <ide> /** <ide> * testSendWithLog method <ide> public function testMessage() { <ide> $this->assertTrue($this->checkContentTransferEncoding($message, '7bit')); <ide> } <ide> <del> <ide> /** <ide> * testReset method <ide> * <ide> public function testEncode() { <ide> <ide> $this->CakeEmail->headerCharset = 'ISO-2022-JP'; <ide> $result = $this->CakeEmail->encode('長い長い長いSubjectの場合はfoldingするのが正しいんだけどいったいどうなるんだろう?'); <del> $expected = "=?ISO-2022-JP?B?GyRCRDkkJEQ5JCREOSQkGyhCU3ViamVjdBskQiROPmw5ZyRPGyhCZm9s?=\r\n" <del> . " =?ISO-2022-JP?B?ZGluZxskQiQ5JGskTiQsQDUkNyQkJHMkQCQxJEkkJCRDJD8kJCRJGyhC?=\r\n" <del> . " =?ISO-2022-JP?B?GyRCJCYkSiRrJHMkQCRtJCYhKRsoQg==?="; <add> $expected = "=?ISO-2022-JP?B?GyRCRDkkJEQ5JCREOSQkGyhCU3ViamVjdBskQiROPmw5ZyRPGyhCZm9s?=\r\n" . <add> " =?ISO-2022-JP?B?ZGluZxskQiQ5JGskTiQsQDUkNyQkJHMkQCQxJEkkJCRDJD8kJCRJGyhC?=\r\n" . <add> " =?ISO-2022-JP?B?GyRCJCYkSiRrJHMkQCRtJCYhKRsoQg==?="; <ide> $this->assertSame($expected, $result); <ide> } <ide> } <ide><path>lib/Cake/Test/Case/Network/Email/SmtpTransportTest.php <ide> public function setCakeEmail($cakeEmail) { <ide> * @return void <ide> */ <ide> protected function _generateSocket() { <del> return; <ide> } <ide> <ide> /** <ide><path>lib/Cake/Test/Case/Network/Http/DigestAuthenticationTest.php <ide> public function testMultipleRequest() { <ide> $this->assertTrue(strpos($this->HttpSocket->request['header']['Authorization'], 'nc=00000003') > 0); <ide> $this->assertEquals($auth['nc'], 4); <ide> $responsePos = strpos($this->HttpSocket->request['header']['Authorization'], 'response='); <del> $response2 = substr($this->HttpSocket->request['header']['Authorization'], $responsePos + 10, 32); <del> $this->assertNotEquals($response, $response2); <add> $responseB = substr($this->HttpSocket->request['header']['Authorization'], $responsePos + 10, 32); <add> $this->assertNotEquals($response, $responseB); <ide> } <ide> <ide> /** <ide><path>lib/Cake/Test/Case/Network/Http/HttpResponseTest.php <ide> public function tokenEscapeChars($hex = true, $chars = null) { <ide> * @package Cake.Test.Case.Network.Http <ide> */ <ide> class HttpResponseTest extends CakeTestCase { <add> <ide> /** <ide> * This function sets up a HttpResponse <ide> * <ide> public function testUnescapeToken() { <ide> $unescapedToken = $this->HttpResponse->unescapeToken($token); <ide> $expectedToken = 'My-special-' . $char . '-Token'; <ide> <del> $this->assertEquals($unescapedToken, $expectedToken, 'Test token unescaping for ASCII '.ord($char)); <add> $this->assertEquals($unescapedToken, $expectedToken, 'Test token unescaping for ASCII ' . ord($char)); <ide> } <ide> <ide> $token = 'Extreme-":"Token-" "-""""@"-test'; <ide> public function testArrayAccess() { <ide> ); <ide> $this->HttpResponse->body = 'This is a test!'; <ide> $this->HttpResponse->raw = "HTTP/1.1 200 OK\r\nServer: CakePHP\r\nContEnt-Type: text/plain\r\n\r\nThis is a test!"; <del> <del> $expected1 = "HTTP/1.1 200 OK\r\n"; <del> $this->assertEquals($this->HttpResponse['raw']['status-line'], $expected1); <del> $expected2 = "Server: CakePHP\r\nContEnt-Type: text/plain\r\n"; <del> $this->assertEquals($this->HttpResponse['raw']['header'], $expected2); <del> $expected3 = 'This is a test!'; <del> $this->assertEquals($this->HttpResponse['raw']['body'], $expected3); <del> $expected = $expected1 . $expected2 . "\r\n" . $expected3; <add> $expectedOne = "HTTP/1.1 200 OK\r\n"; <add> $this->assertEquals($this->HttpResponse['raw']['status-line'], $expectedOne); <add> $expectedTwo = "Server: CakePHP\r\nContEnt-Type: text/plain\r\n"; <add> $this->assertEquals($this->HttpResponse['raw']['header'], $expectedTwo); <add> $expectedThree = 'This is a test!'; <add> $this->assertEquals($this->HttpResponse['raw']['body'], $expectedThree); <add> $expected = $expectedOne . $expectedTwo . "\r\n" . $expectedThree; <ide> $this->assertEquals($this->HttpResponse['raw']['response'], $expected); <ide> <ide> $expected = 'HTTP/1.1'; <ide> public function testArrayAccess() { <ide> $this->assertSame($this->HttpResponse['raw']['header'], null); <ide> } <ide> <del>} <ide>\ No newline at end of file <add>} <ide><path>lib/Cake/Test/Case/Network/Http/HttpSocketTest.php <ide> public function tokenEscapeChars($hex = true, $chars = null) { <ide> * @param string $token Token to escape <ide> * @return string Escaped token <ide> */ <del> public function EscapeToken($token, $chars = null) { <add> public function escapeToken($token, $chars = null) { <ide> return parent::_escapeToken($token, $chars); <ide> } <ide> <ide> public function testRequestCustomResponse() { <ide> $this->assertInstanceOf('CustomResponse', $response); <ide> $this->assertEquals($response->first10, 'HTTP/1.x 2'); <ide> } <del> <ide> <ide> /** <ide> * testRequestWithRedirect method <ide> public function testRequestWithRedirectAsTrue() { <ide> $serverResponse2 = "HTTP/1.x 200 OK\r\nDate: Mon, 16 Apr 2007 04:14:16 GMT\r\nServer: CakeHttp Server\r\nContent-Type: text/html\r\n\r\n<h1>You have been redirected</h1>"; <ide> $this->Socket->expects($this->at(1))->method('read')->will($this->returnValue($serverResponse1)); <ide> $this->Socket->expects($this->at(4))->method('read')->will($this->returnValue($serverResponse2)); <del> <add> <ide> $response = $this->Socket->request($request); <ide> $this->assertEquals('<h1>You have been redirected</h1>', $response->body()); <ide> } <del> <add> <ide> public function testRequestWithRedirectAsInt() { <ide> $request = array( <ide> 'uri' => 'http://localhost/oneuri', <ide> public function testRequestWithRedirectAsInt() { <ide> $serverResponse2 = "HTTP/1.x 200 OK\r\nDate: Mon, 16 Apr 2007 04:14:16 GMT\r\nServer: CakeHttp Server\r\nContent-Type: text/html\r\n\r\n<h1>You have been redirected</h1>"; <ide> $this->Socket->expects($this->at(1))->method('read')->will($this->returnValue($serverResponse1)); <ide> $this->Socket->expects($this->at(4))->method('read')->will($this->returnValue($serverResponse2)); <del> <add> <ide> $response = $this->Socket->request($request); <ide> $this->assertEquals(1, $this->Socket->request['redirect']); <ide> } <del> <add> <ide> public function testRequestWithRedirectAsIntReachingZero() { <ide> $request = array( <ide> 'uri' => 'http://localhost/oneuri', <ide> public function testRequestWithRedirectAsIntReachingZero() { <ide> $serverResponse2 = "HTTP/1.x 302 Found\r\nDate: Mon, 16 Apr 2007 04:14:16 GMT\r\nServer: CakeHttp Server\r\nContent-Type: text/html\r\nLocation: http://localhost/anotheruri\r\n\r\n"; <ide> $this->Socket->expects($this->at(1))->method('read')->will($this->returnValue($serverResponse1)); <ide> $this->Socket->expects($this->at(4))->method('read')->will($this->returnValue($serverResponse2)); <del> <add> <ide> $response = $this->Socket->request($request); <ide> $this->assertEquals(0, $this->Socket->request['redirect']); <ide> $this->assertEquals(302, $response->code); <ide> $this->assertEquals('http://localhost/anotheruri', $response->getHeader('Location')); <ide> } <del> <ide> <ide> /** <ide> * testProxy method <ide> public function testBuildHeader() { <ide> <ide> $r = $this->Socket->buildHeader(array('Test@Field' => "My value")); <ide> $this->assertEquals($r, "Test\"@\"Field: My value\r\n"); <del> <ide> } <ide> <ide> /** <ide> public function testEscapeToken() { <ide> $escapedToken = $this->Socket->escapeToken($token); <ide> $expectedToken = 'My-special-"' . $char . '"-Token'; <ide> <del> $this->assertEquals($escapedToken, $expectedToken, 'Test token escaping for ASCII '.ord($char)); <add> $this->assertEquals($escapedToken, $expectedToken, 'Test token escaping for ASCII ' . ord($char)); <ide> } <ide> <ide> $token = 'Extreme-:Token- -"@-test';
7
Javascript
Javascript
update mmdmaterial shader
d75cd5f3d273cc9beca0130d9d394f3f49d2f80c
<ide><path>examples/js/loaders/MMDLoader.js <ide> THREE.ShaderLib[ 'mmd' ] = { <ide> THREE.UniformsLib[ "fog" ], <ide> THREE.UniformsLib[ "ambient" ], <ide> THREE.UniformsLib[ "lights" ], <add> THREE.UniformsLib[ "shadowmap" ], <ide> <ide> { <ide> "emissive" : { type: "c", value: new THREE.Color( 0x000000 ) }, <ide> THREE.ShaderLib[ 'mmd' ] = { <ide> // accumulation <ide> THREE.ShaderChunk[ "lights_phong_fragment" ], <ide> THREE.ShaderChunk[ "lights_template" ], <del> THREE.ShaderChunk[ "lightmap_fragment" ], <ide> <ide> // modulation <ide> THREE.ShaderChunk[ "aomap_fragment" ],
1
PHP
PHP
change some method names
50b6b32ec5b850ad39fc1e2302d0272520f5c599
<ide><path>app/Http/Controllers/Auth/PasswordController.php <ide> public function showResetRequestForm() <ide> * @param Request $request <ide> * @return Response <ide> */ <del> public function sendPasswordResetLink(Request $request) <add> public function sendResetLink(Request $request) <ide> { <ide> switch ($response = $this->passwords->sendResetLink($request->only('email'))) <ide> { <ide> public function sendPasswordResetLink(Request $request) <ide> * @param string $token <ide> * @return Response <ide> */ <del> public function showPasswordResetForm($token = null) <add> public function showResetForm($token = null) <ide> { <ide> if (is_null($token)) <ide> {
1
Javascript
Javascript
ignore transforms when retrieving width/height
c920ff6e322baddbb2bd04125c5577244dcb0320
<ide><path>src/css.js <ide> function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { <ide> <ide> function getWidthOrHeight( elem, name, extra ) { <ide> <del> // Start with offset property, which is equivalent to the border-box value <del> var val, <del> valueIsBorderBox = true, <add> // Start with computed style <add> var valueIsBorderBox, <ide> styles = getStyles( elem ), <add> val = curCSS( elem, name, styles ), <ide> isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; <ide> <del> // Support: IE <=11 only <del> // Running getBoundingClientRect on a disconnected node <del> // in IE throws an error. <del> if ( elem.getClientRects().length ) { <del> val = elem.getBoundingClientRect()[ name ]; <add> // Fall back to uncomputed css if necessary <add> if ( val < 0 || val == null ) { <add> val = elem.style[ name ]; <ide> } <ide> <del> // Some non-html elements return undefined for offsetWidth, so check for null/undefined <del> // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 <del> // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 <del> if ( val <= 0 || val == null ) { <del> <del> // Fall back to computed then uncomputed css if necessary <del> val = curCSS( elem, name, styles ); <del> if ( val < 0 || val == null ) { <del> val = elem.style[ name ]; <del> } <del> <del> // Computed unit is not pixels. Stop here and return. <del> if ( rnumnonpx.test( val ) ) { <del> return val; <del> } <add> // Computed unit is not pixels. Stop here and return. <add> if ( rnumnonpx.test( val ) ) { <add> return val; <add> } <ide> <del> // Check for style in case a browser which returns unreliable values <del> // for getComputedStyle silently falls back to the reliable elem.style <del> valueIsBorderBox = isBorderBox && <del> ( support.boxSizingReliable() || val === elem.style[ name ] ); <add> // Check for style in case a browser which returns unreliable values <add> // for getComputedStyle silently falls back to the reliable elem.style <add> valueIsBorderBox = isBorderBox && <add> ( support.boxSizingReliable() || val === elem.style[ name ] ); <ide> <del> // Normalize "", auto, and prepare for extra <del> val = parseFloat( val ) || 0; <del> } <add> // Normalize "", auto, and prepare for extra <add> val = parseFloat( val ) || 0; <ide> <ide> // Use the active box-sizing model to add/subtract irrelevant styles <ide> return ( val + <ide><path>test/unit/dimensions.js <ide> QUnit.test( "outside view position (gh-2836)", function( assert ) { <ide> parent.scrollTop( 400 ); <ide> } ); <ide> <add>QUnit.test( "width/height on element with transform (gh-3193)", function( assert ) { <add> <add> assert.expect( 2 ); <add> <add> var $elem = jQuery( "<div style='width: 200px; height: 200px; transform: scale(2);' />" ) <add> .appendTo( "#qunit-fixture" ); <add> <add> assert.equal( $elem.width(), 200, "Width ignores transforms" ); <add> assert.equal( $elem.height(), 200, "Height ignores transforms" ); <add>} ); <add> <ide> } )();
2
Text
Text
clarify description of `readable.push()` method
63c0f15a8286ffb1fa99d4eb5faebfd844d8451a
<ide><path>doc/api/stream.md <ide> class SourceWrapper extends Readable { <ide> } <ide> ``` <ide> <del>The `readable.push()` method is intended be called only by `Readable` <del>implementers, and only from within the `readable._read()` method. <add>The `readable.push()` method is used to push the content <add>into the internal buffer. It can be driven by the `readable._read()` method. <ide> <ide> For streams not operating in object mode, if the `chunk` parameter of <ide> `readable.push()` is `undefined`, it will be treated as empty string or
1
Javascript
Javascript
fix `one` method on evented interface
b534482da2bf66e2fd61226ad7026c6e46a16f3b
<ide><path>packages/ember-runtime/lib/mixins/evented.js <ide> Ember.Evented = Ember.Mixin.create( <ide> target = null; <ide> } <ide> <add> var self = this; <ide> var wrapped = function() { <del> Ember.removeListener(this, name, target, wrapped); <add> Ember.removeListener(self, name, target, wrapped); <add> <add> if ('string' === typeof method) { method = this[method]; } <ide> <ide> // Internally, a `null` target means that the target is <ide> // the first parameter to addListener. That means that <ide><path>packages/ember-runtime/tests/system/object/events_test.js <ide> test("binding an event can specify a different target", function() { <ide> equal(self, target); <ide> }); <ide> <add>test("a listener registered with one can take method as string and can be added with different target", function() { <add> var count = 0; <add> var target = {}; <add> target.fn = function() { count++; }; <add> <add> var obj = Ember.Object.create(Ember.Evented); <add> <add> obj.one('event!', target, 'fn'); <add> obj.trigger('event!'); <add> <add> equal(count, 1, "the event was triggered"); <add> <add> obj.trigger('event!'); <add> <add> equal(count, 1, "the event was not triggered again"); <add>});
2
Python
Python
apply suggestions from code review
dd2ddde2a22b80ad8fee815276065f417bdd2177
<ide><path>numpy/core/function_base.py <ide> def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, <ide> <ide> .. versionchanged:: 1.20.0 <ide> Values are rounded towards ``-inf`` instead of ``0`` when an <del> integer ``dtype`` is specified. <add> integer ``dtype`` is specified. The old behavior can <add> still be obtained with ``np.linspace(start, stop, num).astype(int)`` <ide> <ide> Parameters <ide> ---------- <ide><path>numpy/core/tests/test_function_base.py <ide> def test_round_negative(self): <ide> y = linspace(-1, 3, num=8, dtype=int) <ide> t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int) <ide> assert_array_equal(y, t) <del> <del>
2
Javascript
Javascript
recognize array-likes in link-render-node
b9e1c56456273351d01d9e79b67fb8ccf629d31b
<ide><path>packages/ember-htmlbars/lib/hooks/link-render-node.js <ide> */ <ide> <ide> import subscribe from "ember-htmlbars/utils/subscribe"; <del>import { isArray } from "ember-metal/utils"; <add>import { isArray } from "ember-runtime/utils"; <ide> import { chain, read, readArray, isStream, addDependency } from "ember-metal/streams/utils"; <ide> import { findHelper } from "ember-htmlbars/system/lookup-helper"; <ide> <ide><path>packages/ember-htmlbars/tests/helpers/if_unless_test.js <ide> import EmberView from "ember-views/views/view"; <ide> import ObjectProxy from "ember-runtime/system/object_proxy"; <ide> import EmberObject from "ember-runtime/system/object"; <ide> import compile from "ember-template-compiler/system/compile"; <add>import ArrayProxy from "ember-runtime/system/array_proxy"; <ide> <ide> import { set } from 'ember-metal/property_set'; <ide> import { fmt } from 'ember-runtime/system/string'; <ide> QUnit.test("The `if` helper updates if an object proxy gains or loses context", <ide> equal(view.$().text(), ''); <ide> }); <ide> <del>QUnit.test("The `if` helper updates if an array is empty or not", function() { <add>function testIfArray(array) { <ide> view = EmberView.create({ <del> array: Ember.A(), <add> array: array, <ide> <ide> template: compile('{{#if view.array}}Yep{{/if}}') <ide> }); <ide> QUnit.test("The `if` helper updates if an array is empty or not", function() { <ide> }); <ide> <ide> equal(view.$().text(), ''); <add> <add>} <add> <add>QUnit.test("The `if` helper updates if an array is empty or not", function() { <add> testIfArray(Ember.A()); <add>}); <add> <add>QUnit.test("The `if` helper updates if an array-like object is empty or not", function() { <add> testIfArray(ArrayProxy.create({ content: Ember.A([]) })); <ide> }); <ide> <ide> QUnit.test("The `if` helper updates when the value changes", function() {
2
Ruby
Ruby
drop variable assignment in validations
c082e3f615a078364172f0a6d8a335d6bf353d94
<ide><path>activerecord/lib/active_record/validations.rb <ide> def valid?(context = nil) <ide> protected <ide> <ide> def perform_validations(options={}) # :nodoc: <del> perform_validation = options[:validate] != false <del> perform_validation ? valid?(options[:context]) : true <add> options[:validate] == false || valid?(options[:context]) <ide> end <ide> end <ide> end
1
Python
Python
simplify names; bugfix for single-gpu training
1c47fab2a8ca1aebb67fc9972b14a76034147c6d
<ide><path>research/deeplab/common.py <ide> # Test set name. <ide> TEST_SET = 'test' <ide> <del> <ide> class ModelOptions( <ide> collections.namedtuple('ModelOptions', [ <ide> 'outputs_to_num_classes', <ide><path>research/deeplab/train.py <ide> def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label): <ide> """ <ide> samples = inputs_queue.dequeue() <ide> <del> # add name input and label so we can add to summary <del> samples[common.IMAGE] = tf.identity(samples[common.IMAGE], 'input_image') <del> samples[common.LABEL] = tf.identity(samples[common.LABEL], 'input_label') <add> # add name to input and label nodes so we can add to summary <add> samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name = common.IMAGE) <add> samples[common.LABEL] = tf.identity(samples[common.LABEL], name = common.LABEL) <ide> <ide> model_options = common.ModelOptions( <ide> outputs_to_num_classes=outputs_to_num_classes, <ide> def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label): <ide> # add name to graph node so we can add to summary <ide> outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE] = tf.identity( <ide> outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE], <del> name = OUTPUT_MERGED_LOGITS_NODE <add> name = common.OUTPUT_TYPE <ide> ) <ide> <ide> for output, num_classes in six.iteritems(outputs_to_num_classes): <ide> def main(unused_argv): <ide> <ide> # Add summaries for images, labels, semantic predictions <ide> if FLAGS.save_summaries_images: <del> summary_image = graph.get_tensor_by_name('%s/%s:0' % first_clone_slope, INPUT_LABEL_NODE) <del> summaries.add(tf.summary.image('samples/%s' % INPUT_IMAGE_NODE, summary_image)) <del> <del> summary_label = tf.cast(graph.get_tensor_by_name('%s/%s:0' % first_clone_slope, INPUT_LABEL_NODE), tf.uint8) <del> summaries.add(tf.summary.image('samples/%s' % INPUT_LABEL_NODE, summary_label)) <del> <del> predictions = tf.cast(tf.expand_dims(tf.argmax(graph.get_tensor_by_name('%s/%s:0' % first_clone_scope, OUTPUT_MERGED_LOGITS_NODE), 3), -1), tf.uint8) <del> summaries.add(tf.summary.image('samples/%s' % OUTPUT_MERGED_LOGITS_NODE, predictions)) <add> summary_image = graph.get_tensor_by_name( <add> ('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/')) <add> summaries.add(tf.summary.image('samples/%s' % common.IMAGE, summary_image)) <add> <add> summary_label = tf.cast(graph.get_tensor_by_name( <add> ('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/')), <add> tf.uint8) <add> summaries.add(tf.summary.image('samples/%s' % common.LABEL, summary_label)) <add> <add> predictions = tf.cast(tf.expand_dims(tf.argmax(graph.get_tensor_by_name( <add> ('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/')), <add> 3), -1), tf.uint8) <add> summaries.add(tf.summary.image('samples/%s' % common.OUTPUT_TYPE, predictions)) <ide> <ide> # Add summaries for losses. <ide> for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
2
Python
Python
update changlog.py docstring
2dfa9e88864f17f7d097e413fa74561feb8f6515
<ide><path>tools/changelog.py <ide> #!/usr/bin/env python <ide> # -*- encoding:utf-8 -*- <ide> """ <del>Script to generate contribor and pull request lists <add>Script to generate contributor and pull request lists <ide> <ide> This script generates contributor and pull request lists for release <del>announcements using Github v3 protocol. Use requires an authentication token in <add>changelogs using Github v3 protocol. Use requires an authentication token in <ide> order to have sufficient bandwidth, you can get one following the directions at <ide> `<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>_ <ide> Don't add any scope, as the default is read access to public information. The <ide> Examples <ide> -------- <ide> <del>From the bash command line with $GITHUB token. <add>From the bash command line with $GITHUB token:: <ide> <del> $ ./tools/announce $GITHUB v1.11.0..v1.11.1 > announce.rst <add> $ ./tools/announce $GITHUB v1.13.0..v1.14.0 > 1.14.0-changelog.rst <ide> <ide> """ <ide> from __future__ import print_function, division
1
Python
Python
return the coefficients array directly
389bd44e32b0eace0d024b126931a0a00d14cffe
<ide><path>numpy/lib/polynomial.py <ide> class poly1d(object): <ide> <ide> @property <ide> def coeffs(self): <del> """ A copy of the polynomial coefficients """ <del> return self._coeffs.copy() <add> """ The polynomial coefficients """ <add> return self._coeffs <add> <add> @coeffs.setter <add> def coeffs(self, value): <add> # allowing this makes p.coeffs *= 2 legal <add> if value is not self._coeffs: <add> raise AttributeError("Cannot set attribute") <ide> <ide> @property <ide> def variable(self): <ide><path>numpy/lib/tests/test_polynomial.py <ide> def test_poly_eq(self): <ide> assert_equal(p == p2, False) <ide> assert_equal(p != p2, True) <ide> <del> def test_poly_coeffs_immutable(self): <del> """ Coefficients should not be modifiable """ <add> def test_poly_coeffs_mutable(self): <add> """ Coefficients should be modifiable """ <ide> p = np.poly1d([1, 2, 3]) <ide> <del> try: <del> # despite throwing an exception, this used to change state <del> p.coeffs += 1 <del> except Exception: <del> pass <del> assert_equal(p.coeffs, [1, 2, 3]) <add> p.coeffs += 1 <add> assert_equal(p.coeffs, [2, 3, 4]) <ide> <ide> p.coeffs[2] += 10 <del> assert_equal(p.coeffs, [1, 2, 3]) <add> assert_equal(p.coeffs, [2, 3, 14]) <add> <add> # this never used to be allowed - let's not add features to deprecated <add> # APIs <add> assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) <ide> <ide> <ide> if __name__ == "__main__":
2
Python
Python
remove sys.version_info[0] == 2 or 3
798b3b3899e786273ff280b3bffc332749cff348
<ide><path>examples/contrib/run_swag.py <ide> import logging <ide> import os <ide> import random <del>import sys <ide> <ide> import numpy as np <ide> import torch <ide> def __init__(self, example_id, choices_features, label): <ide> <ide> def read_swag_examples(input_file, is_training=True): <ide> with open(input_file, "r", encoding="utf-8") as f: <del> reader = csv.reader(f) <del> lines = [] <del> for line in reader: <del> if sys.version_info[0] == 2: <del> line = list(unicode(cell, "utf-8") for cell in line) # noqa: F821 <del> lines.append(line) <add> lines = list(csv.reader(f)) <ide> <ide> if is_training and lines[0][-1] != "label": <ide> raise ValueError("For training, the input file must contain a label column.") <ide><path>examples/utils_multiple_choice.py <ide> import json <ide> import logging <ide> import os <del>import sys <ide> from io import open <ide> from typing import List <ide> <ide> def get_labels(self): <ide> <ide> def _read_csv(self, input_file): <ide> with open(input_file, "r", encoding="utf-8") as f: <del> reader = csv.reader(f) <del> lines = [] <del> for line in reader: <del> if sys.version_info[0] == 2: <del> line = list(unicode(cell, "utf-8") for cell in line) # noqa: F821 <del> lines.append(line) <del> return lines <add> return list(csv.reader(f)) <ide> <ide> def _create_examples(self, lines: List[List[str]], type: str): <ide> """Creates examples for the training and dev sets.""" <ide><path>src/transformers/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py <ide> import argparse <ide> import logging <ide> import os <add>import pickle <ide> import sys <ide> from io import open <ide> <ide> from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES <ide> <ide> <del>if sys.version_info[0] == 2: <del> import cPickle as pickle <del>else: <del> import pickle <del> <del> <ide> logging.basicConfig(level=logging.INFO) <ide> <ide> # We do this to be able to load python 2 datasets pickles <ide><path>src/transformers/data/processors/utils.py <ide> import csv <ide> import json <ide> import logging <del>import sys <ide> <ide> from ...file_utils import is_tf_available, is_torch_available <ide> <ide> class DataProcessor(object): <ide> def _read_tsv(cls, input_file, quotechar=None): <ide> """Reads a tab separated value file.""" <ide> with open(input_file, "r", encoding="utf-8-sig") as f: <del> reader = csv.reader(f, delimiter="\t", quotechar=quotechar) <del> lines = [] <del> for line in reader: <del> if sys.version_info[0] == 2: <del> line = list(unicode(cell, "utf-8") for cell in line) # noqa: F821 <del> lines.append(line) <del> return lines <add> return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) <ide> <ide> <ide> class SingleSentenceClassificationProcessor(DataProcessor): <ide><path>src/transformers/file_utils.py <ide> def filename_to_url(filename, cache_dir=None): <ide> """ <ide> if cache_dir is None: <ide> cache_dir = TRANSFORMERS_CACHE <del> if sys.version_info[0] == 3 and isinstance(cache_dir, Path): <add> if isinstance(cache_dir, Path): <ide> cache_dir = str(cache_dir) <ide> <ide> cache_path = os.path.join(cache_dir, filename) <ide> def cached_path( <ide> """ <ide> if cache_dir is None: <ide> cache_dir = TRANSFORMERS_CACHE <del> if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): <add> if isinstance(url_or_filename, Path): <ide> url_or_filename = str(url_or_filename) <del> if sys.version_info[0] == 3 and isinstance(cache_dir, Path): <add> if isinstance(cache_dir, Path): <ide> cache_dir = str(cache_dir) <ide> <ide> if is_remote_url(url_or_filename): <ide> def get_from_cache( <ide> """ <ide> if cache_dir is None: <ide> cache_dir = TRANSFORMERS_CACHE <del> if sys.version_info[0] == 3 and isinstance(cache_dir, Path): <del> cache_dir = str(cache_dir) <del> if sys.version_info[0] == 2 and not isinstance(cache_dir, str): <add> if isinstance(cache_dir, Path): <ide> cache_dir = str(cache_dir) <ide> <ide> if not os.path.exists(cache_dir): <ide> def get_from_cache( <ide> except (EnvironmentError, requests.exceptions.Timeout): <ide> etag = None <ide> <del> if sys.version_info[0] == 2 and etag is not None: <del> etag = etag.decode("utf-8") <ide> filename = url_to_filename(url, etag) <ide> <ide> # get cache path to put the file <ide> def _resumable_file_manager(): <ide> meta = {"url": url, "etag": etag} <ide> meta_path = cache_path + ".json" <ide> with open(meta_path, "w") as meta_file: <del> output_string = json.dumps(meta) <del> if sys.version_info[0] == 2 and isinstance(output_string, str): <del> output_string = unicode(output_string, "utf-8") # noqa: F821 <del> meta_file.write(output_string) <add> json.dump(meta, meta_file) <ide> <ide> return cache_path <ide><path>src/transformers/modeling_bert.py <ide> import logging <ide> import math <ide> import os <del>import sys <ide> <ide> import torch <ide> from torch import nn <ide> class BertIntermediate(nn.Module): <ide> def __init__(self, config): <ide> super(BertIntermediate, self).__init__() <ide> self.dense = nn.Linear(config.hidden_size, config.intermediate_size) <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.intermediate_act_fn = ACT2FN[config.hidden_act] <ide> else: <ide> self.intermediate_act_fn = config.hidden_act <ide> class BertPredictionHeadTransform(nn.Module): <ide> def __init__(self, config): <ide> super(BertPredictionHeadTransform, self).__init__() <ide> self.dense = nn.Linear(config.hidden_size, config.hidden_size) <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.transform_act_fn = ACT2FN[config.hidden_act] <ide> else: <ide> self.transform_act_fn = config.hidden_act <ide><path>src/transformers/modeling_tf_albert.py <ide> <ide> <ide> import logging <del>import sys <ide> <ide> import tensorflow as tf <ide> <ide> def __init__(self, config, **kwargs): <ide> config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn" <ide> ) <ide> <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.activation = ACT2FN[config.hidden_act] <ide> else: <ide> self.activation = config.hidden_act <ide> def __init__(self, config, input_embeddings, **kwargs): <ide> self.dense = tf.keras.layers.Dense( <ide> config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" <ide> ) <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.activation = ACT2FN[config.hidden_act] <ide> else: <ide> self.activation = config.hidden_act <ide><path>src/transformers/modeling_tf_bert.py <ide> <ide> <ide> import logging <del>import sys <ide> <ide> import numpy as np <ide> import tensorflow as tf <ide> def __init__(self, config, **kwargs): <ide> self.dense = tf.keras.layers.Dense( <ide> config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" <ide> ) <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.intermediate_act_fn = ACT2FN[config.hidden_act] <ide> else: <ide> self.intermediate_act_fn = config.hidden_act <ide> def __init__(self, config, **kwargs): <ide> self.dense = tf.keras.layers.Dense( <ide> config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" <ide> ) <del> if isinstance(config.hidden_act, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode) # noqa: F821 <del> ): <add> if isinstance(config.hidden_act, str): <ide> self.transform_act_fn = ACT2FN[config.hidden_act] <ide> else: <ide> self.transform_act_fn = config.hidden_act <ide><path>src/transformers/modeling_tf_xlnet.py <ide> <ide> <ide> import logging <del>import sys <ide> <ide> import numpy as np <ide> import tensorflow as tf <ide> def __init__(self, config, **kwargs): <ide> config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2" <ide> ) <ide> self.dropout = tf.keras.layers.Dropout(config.dropout) <del> if isinstance(config.ff_activation, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode) # noqa: F821 <del> ): <add> if isinstance(config.ff_activation, str): <ide> self.activation_function = ACT2FN[config.ff_activation] <ide> else: <ide> self.activation_function = config.ff_activation <ide><path>src/transformers/modeling_xlnet.py <ide> <ide> import logging <ide> import math <del>import sys <ide> <ide> import torch <ide> from torch import nn <ide> def __init__(self, config): <ide> self.layer_1 = nn.Linear(config.d_model, config.d_inner) <ide> self.layer_2 = nn.Linear(config.d_inner, config.d_model) <ide> self.dropout = nn.Dropout(config.dropout) <del> if isinstance(config.ff_activation, str) or ( <del> sys.version_info[0] == 2 and isinstance(config.ff_activation, unicode) # noqa: F821 <del> ): <add> if isinstance(config.ff_activation, str): <ide> self.activation_function = ACT2FN[config.ff_activation] <ide> else: <ide> self.activation_function = config.ff_activation <ide><path>src/transformers/tokenization_gpt2.py <ide> import json <ide> import logging <ide> import os <del>import sys <ide> from io import open <ide> <ide> import regex as re <ide> def bytes_to_unicode(): <ide> This is a signficant percentage of your normal, say, 32K bpe vocab. <ide> To avoid that, we want lookup tables between utf-8 bytes and unicode strings. <ide> """ <del> _chr = unichr if sys.version_info[0] == 2 else chr # noqa: F821 <ide> bs = ( <ide> list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) <ide> ) <ide> def bytes_to_unicode(): <ide> bs.append(b) <ide> cs.append(2 ** 8 + n) <ide> n += 1 <del> cs = [_chr(n) for n in cs] <add> cs = [chr(n) for n in cs] <ide> return dict(zip(bs, cs)) <ide> <ide> <ide> def _tokenize(self, text, add_prefix_space=False): <ide> <ide> bpe_tokens = [] <ide> for token in re.findall(self.pat, text): <del> if sys.version_info[0] == 2: <del> token = "".join( <del> self.byte_encoder[ord(b)] for b in token <del> ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) <del> else: <del> token = "".join( <del> self.byte_encoder[b] for b in token.encode("utf-8") <del> ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) <add> token = "".join( <add> self.byte_encoder[b] for b in token.encode("utf-8") <add> ) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case) <ide> bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) <ide> return bpe_tokens <ide> <ide><path>src/transformers/tokenization_transfo_xl.py <ide> import glob <ide> import logging <ide> import os <del>import sys <add>import pickle <ide> from collections import Counter, OrderedDict <ide> from io import open <ide> <ide> except ImportError: <ide> pass <ide> <del>if sys.version_info[0] == 2: <del> import cPickle as pickle <del>else: <del> import pickle <del> <ide> <ide> logger = logging.getLogger(__name__) <ide> <ide><path>tests/test_configuration_common.py <ide> <ide> import json <ide> import os <del> <del>from .test_tokenization_common import TemporaryDirectory <add>import tempfile <ide> <ide> <ide> class ConfigTester(object): <ide> def create_and_test_config_to_json_string(self): <ide> def create_and_test_config_to_json_file(self): <ide> config_first = self.config_class(**self.inputs_dict) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> json_file_path = os.path.join(tmpdirname, "config.json") <ide> config_first.to_json_file(json_file_path) <ide> config_second = self.config_class.from_json_file(json_file_path) <ide> def create_and_test_config_to_json_file(self): <ide> def create_and_test_config_from_and_save_pretrained(self): <ide> config_first = self.config_class(**self.inputs_dict) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> config_first.save_pretrained(tmpdirname) <ide> config_second = self.config_class.from_pretrained(tmpdirname) <ide> <ide><path>tests/test_model_card.py <ide> <ide> import json <ide> import os <add>import tempfile <ide> import unittest <ide> <ide> from transformers.modelcard import ModelCard <ide> <del>from .test_tokenization_common import TemporaryDirectory <del> <ide> <ide> class ModelCardTester(unittest.TestCase): <ide> def setUp(self): <ide> def test_model_card_to_json_string(self): <ide> def test_model_card_to_json_file(self): <ide> model_card_first = ModelCard.from_dict(self.inputs_dict) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> filename = os.path.join(tmpdirname, "modelcard.json") <ide> model_card_first.to_json_file(filename) <ide> model_card_second = ModelCard.from_json_file(filename) <ide> def test_model_card_to_json_file(self): <ide> def test_model_card_from_and_save_pretrained(self): <ide> model_card_first = ModelCard.from_dict(self.inputs_dict) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> model_card_first.save_pretrained(tmpdirname) <ide> model_card_second = ModelCard.from_pretrained(tmpdirname) <ide> <ide><path>tests/test_modeling_common.py <ide> import logging <ide> import os.path <ide> import random <del>import shutil <del>import sys <ide> import tempfile <ide> import unittest <ide> import uuid <ide> BERT_PRETRAINED_MODEL_ARCHIVE_MAP, <ide> ) <ide> <del>if sys.version_info[0] == 2: <del> <del> class TemporaryDirectory(object): <del> """Context manager for tempfile.mkdtemp() so it's usable with "with" statement.""" <del> <del> def __enter__(self): <del> self.name = tempfile.mkdtemp() <del> return self.name <del> <del> def __exit__(self, exc_type, exc_value, traceback): <del> shutil.rmtree(self.name) <del> <del> <del>else: <del> TemporaryDirectory = tempfile.TemporaryDirectory <del> unicode = str <del> <ide> <ide> def _config_zero_init(config): <ide> configs_no_init = copy.deepcopy(config) <ide> def test_save_load(self): <ide> out_2 = outputs[0].numpy() <ide> out_2[np.isnan(out_2)] = 0 <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> model.save_pretrained(tmpdirname) <ide> model = model_class.from_pretrained(tmpdirname) <ide> model.to(torch_device) <ide> def _create_and_check_torchscript(self, config, inputs_dict): <ide> except RuntimeError: <ide> self.fail("Couldn't trace module.") <ide> <del> with TemporaryDirectory() as tmp_dir_name: <add> with tempfile.TemporaryDirectory() as tmp_dir_name: <ide> pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") <ide> <ide> try: <ide> def test_head_pruning_save_load_from_pretrained(self): <ide> heads_to_prune = {0: list(range(1, self.model_tester.num_attention_heads)), -1: [0]} <ide> model.prune_heads(heads_to_prune) <ide> <del> with TemporaryDirectory() as temp_dir_name: <add> with tempfile.TemporaryDirectory() as temp_dir_name: <ide> model.save_pretrained(temp_dir_name) <ide> model = model_class.from_pretrained(temp_dir_name) <ide> model.to(torch_device) <ide> def test_head_pruning_integration(self): <ide> self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads) <ide> self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) <ide> <del> with TemporaryDirectory() as temp_dir_name: <add> with tempfile.TemporaryDirectory() as temp_dir_name: <ide> model.save_pretrained(temp_dir_name) <ide> model = model_class.from_pretrained(temp_dir_name) <ide> model.to(torch_device) <ide><path>tests/test_modeling_tf_common.py <ide> import copy <ide> import os <ide> import random <del>import shutil <del>import sys <ide> import tempfile <ide> <ide> from transformers import is_tf_available, is_torch_available <ide> <ide> # from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP <ide> <del>if sys.version_info[0] == 2: <del> <del> class TemporaryDirectory(object): <del> """Context manager for tempfile.mkdtemp() so it's usable with "with" statement.""" <del> <del> def __enter__(self): <del> self.name = tempfile.mkdtemp() <del> return self.name <del> <del> def __exit__(self, exc_type, exc_value, traceback): <del> shutil.rmtree(self.name) <del> <del> <del>else: <del> TemporaryDirectory = tempfile.TemporaryDirectory <del> unicode = str <del> <ide> <ide> def _config_zero_init(config): <ide> configs_no_init = copy.deepcopy(config) <ide> def test_save_load(self): <ide> model = model_class(config) <ide> outputs = model(inputs_dict) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> model.save_pretrained(tmpdirname) <ide> model = model_class.from_pretrained(tmpdirname) <ide> after_outputs = model(inputs_dict) <ide> def test_pt_tf_model_equivalence(self): <ide> self.assertLessEqual(max_diff, 2e-2) <ide> <ide> # Check we can load pt model in tf and vice-versa with checkpoint => model functions <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") <ide> torch.save(pt_model.state_dict(), pt_checkpoint_path) <ide> tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path) <ide> def test_compile_tf_model(self): <ide> model = model_class(config) <ide> <ide> # Let's load it from the disk to be sure we can use pretrained weights <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> outputs = model(inputs_dict) # build the model <ide> model.save_pretrained(tmpdirname) <ide> model = model_class.from_pretrained(tmpdirname) <ide><path>tests/test_optimization.py <ide> <ide> <ide> import os <add>import tempfile <ide> import unittest <ide> <ide> from transformers import is_torch_available <ide> <del>from .test_tokenization_common import TemporaryDirectory <ide> from .utils import require_torch <ide> <ide> <ide> def unwrap_and_save_reload_schedule(scheduler, num_steps=10): <ide> scheduler.step() <ide> lrs.append(scheduler.get_lr()) <ide> if step == num_steps // 2: <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> file_name = os.path.join(tmpdirname, "schedule.bin") <ide> torch.save(scheduler.state_dict(), file_name) <ide> <ide><path>tests/test_tokenization_common.py <ide> <ide> <ide> import os <add>import pickle <ide> import shutil <del>import sys <ide> import tempfile <ide> from io import open <ide> <ide> <del>if sys.version_info[0] == 2: <del> import cPickle as pickle <del> <del> class TemporaryDirectory(object): <del> """Context manager for tempfile.mkdtemp() so it's usable with "with" statement.""" <del> <del> def __enter__(self): <del> self.name = tempfile.mkdtemp() <del> return self.name <del> <del> def __exit__(self, exc_type, exc_value, traceback): <del> shutil.rmtree(self.name) <del> <del> <del>else: <del> import pickle <del> <del> TemporaryDirectory = tempfile.TemporaryDirectory <del> unicode = str <del> <del> <ide> class TokenizerTesterMixin: <ide> <ide> tokenizer_class = None <ide> def test_save_and_load_tokenizer(self): <ide> <ide> before_tokens = tokenizer.encode("He is very happy, UNwant\u00E9d,running", add_special_tokens=False) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> tokenizer.save_pretrained(tmpdirname) <ide> tokenizer = self.tokenizer_class.from_pretrained(tmpdirname) <ide> <ide> def test_pickle_tokenizer(self): <ide> text = "Munich and Berlin are nice cities" <ide> subwords = tokenizer.tokenize(text) <ide> <del> with TemporaryDirectory() as tmpdirname: <add> with tempfile.TemporaryDirectory() as tmpdirname: <ide> <ide> filename = os.path.join(tmpdirname, "tokenizer.bin") <ide> with open(filename, "wb") as handle: <ide> def test_required_methods_tokenizer(self): <ide> self.assertEqual(text_2, output_text) <ide> <ide> self.assertNotEqual(len(tokens_2), 0) <del> self.assertIsInstance(text_2, (str, unicode)) <add> self.assertIsInstance(text_2, str) <ide> <ide> def test_encode_decode_with_spaces(self): <ide> tokenizer = self.get_tokenizer() <ide> def test_pretrained_model_lists(self): <ide> self.assertListEqual(weights_list, weights_list_2) <ide> <ide> def test_mask_output(self): <del> if sys.version_info <= (3, 0): <del> return <del> <ide> tokenizer = self.get_tokenizer() <ide> <ide> if tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer":
18
Python
Python
fix parsing of prices for a size
09217fef5b465331bcf8c4648c62474d64e1bf21
<ide><path>libcloud/drivers/rimuhosting.py <ide> def _to_size(self,plan): <ide> ram=plan['minimum_memory_mb'], <ide> disk=plan['minimum_disk_gb'], <ide> bandwidth=plan['minimum_data_transfer_allowance_gb'], <del> price=plan['monthly_recurring_fee_usd'], <add> price=plan['monthly_recurring_amt']['amt_usd'], <ide> driver=self.connection.driver) <ide> <ide> def _to_image(self,image):
1
Python
Python
fix python 2.5 test failure
0db427e15c5d952d22163ba41fc7d94c46c4e55c
<ide><path>libcloud/test/compute/test_deployment.py <ide> # See the License for the specific language governing permissions and <ide> # limitations under the License. <ide> <add>from __future__ import with_statement <add> <ide> import os <ide> import sys <ide> import time
1
Javascript
Javascript
hoist harmony exports
0bd5d281e297a615ed688cabe55e82a592fc20db
<ide><path>lib/dependencies/HarmonyExportDependencyParserPlugin.js <ide> module.exports = AbstractPlugin.create({ <ide> } else { <ide> var immutable = statement.declaration && isImmutableStatement(statement.declaration); <ide> var hoisted = statement.declaration && isHoistedStatement(statement.declaration); <del> dep = new HarmonyExportSpecifierDependency(this.state.module, id, name, hoisted ? -0.5 : (statement.range[1] + 0.5), immutable); <add> dep = new HarmonyExportSpecifierDependency(this.state.module, id, name, !immutable || hoisted ? -0.5 : (statement.range[1] + 0.5), immutable); <ide> } <ide> dep.loc = statement.loc; <ide> this.state.current.addDependency(dep); <ide> function isImmutableStatement(statement) { <ide> <ide> function isHoistedStatement(statement) { <ide> if(statement.type === "FunctionDeclaration") return true; <del> if(statement.type === "VariableDeclaration" && statement.kind === "var") return true; <ide> return false; <ide> }
1
Ruby
Ruby
fix more offences
49f9dff9b6ba1451d8c85927d5f75327bd2322d9
<ide><path>actionview/test/abstract_unit.rb <ide> def config <ide> class ActionDispatch::IntegrationTest < ActiveSupport::TestCase <ide> def self.build_app(routes = nil) <ide> routes ||= ActionDispatch::Routing::RouteSet.new.tap { |rs| <del> rs.draw {} <add> rs.draw { } <ide> } <ide> RoutedRackApp.new(routes) do |middleware| <ide> middleware.use ActionDispatch::ShowExceptions, ActionDispatch::PublicExceptions.new("#{FIXTURE_LOAD_PATH}/public") <ide><path>actionview/test/activerecord/polymorphic_routes_test.rb <ide> def self.use_relative_model_naming? <ide> <ide> class PolymorphicRoutesTest < ActionController::TestCase <ide> Routes = ActionDispatch::Routing::RouteSet.new <del> Routes.draw {} <add> Routes.draw { } <ide> include Routes.url_helpers <ide> <ide> default_url_options[:host] = "example.com" <ide><path>actionview/test/template/erb/helper.rb <ide> def protect_against_forgery?() false end <ide> class BlockTestCase < ActiveSupport::TestCase <ide> def render_content(start, inside, routes = nil) <ide> routes ||= ActionDispatch::Routing::RouteSet.new.tap do |rs| <del> rs.draw {} <add> rs.draw { } <ide> end <ide> context = Class.new(ViewContext) { <ide> include routes.url_helpers
3
Python
Python
create prim.py (#397)
89f15bef0a32e8e2c1c26f773daebfbe4dd3564f
<ide><path>Graphs/prim.py <add>""" <add>Prim's Algorithm. <add> <add>Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm <add> <add>Create a list to store x the vertices. <add>G = [vertex(n) for n in range(x)] <add> <add>For each vertex in G, add the neighbors: <add>G[x].addNeighbor(G[y]) <add>G[y].addNeighbor(G[x]) <add> <add>For each vertex in G, add the edges: <add>G[x].addEdge(G[y], w) <add>G[y].addEdge(G[x], w) <add> <add>To solve run: <add>MST = prim(G, G[0]) <add>""" <add> <add>import math <add> <add> <add>class vertex(): <add> """Class Vertex.""" <add> <add> def __init__(self, id): <add> """ <add> Arguments: <add> id - input an id to identify the vertex <add> <add> Attributes: <add> neighbors - a list of the vertices it is linked to <add> edges - a dict to store the edges's weight <add> """ <add> self.id = str(id) <add> self.key = None <add> self.pi = None <add> self.neighbors = [] <add> self.edges = {} # [vertex:distance] <add> <add> def __lt__(self, other): <add> """Comparison rule to < operator.""" <add> return (self.key < other.key) <add> <add> def __repr__(self): <add> """Return the vertex id.""" <add> return self.id <add> <add> def addNeighbor(self, vertex): <add> """Add a pointer to a vertex at neighbor's list.""" <add> self.neighbors.append(vertex) <add> <add> def addEdge(self, vertex, weight): <add> """Destination vertex and weight.""" <add> self.edges[vertex.id] = weight <add> <add> <add>def prim(graph, root): <add> """ <add> Prim's Algorithm. <add> <add> Return a list with the edges of a Minimum Spanning Tree <add> <add> prim(graph, graph[0]) <add> """ <add> A = [] <add> for u in graph: <add> u.key = math.inf <add> u.pi = None <add> root.key = 0 <add> Q = graph[:] <add> while Q: <add> u = min(Q) <add> Q.remove(u) <add> for v in u.neighbors: <add> if (v in Q) and (u.edges[v.id] < v.key): <add> v.pi = u <add> v.key = u.edges[v.id] <add> for i in range(1, len(graph)): <add> A.append([graph[i].id, graph[i].pi.id]) <add> return(A)
1
Text
Text
remove indent from .dockerignore example
0f5a98122f92d3fef520773ea619523129794fd7
<ide><path>docs/reference/builder.md <ide> Here is an example `.dockerignore` file: <ide> <ide> ``` <ide> # comment <del> */temp* <del> */*/temp* <del> temp? <add>*/temp* <add>*/*/temp* <add>temp? <ide> ``` <ide> <ide> This file causes the following build behavior:
1
Python
Python
fix mypy errors in google.cloud.sensors
1f662571b2133df09da22aea35936bb10b8ebffa
<ide><path>airflow/providers/google/cloud/hooks/datafusion.py <ide> def get_pipeline_workflow( <ide> instance_url: str, <ide> pipeline_id: str, <ide> namespace: str = "default", <del> ) -> str: <add> ) -> Any: <ide> url = os.path.join( <ide> self._base_url(instance_url, namespace), <ide> quote(pipeline_name), <ide><path>airflow/providers/google/cloud/sensors/bigquery_dts.py <ide> def _normalize_state_list(self, states) -> Set[TransferState]: <ide> result = set() <ide> for state in states: <ide> if isinstance(state, str): <del> result.add(TransferState[state.upper()]) <add> # The proto.Enum type is indexable (via MetaClass and aliased) but MyPy is not able to <add> # infer this https://github.com/python/mypy/issues/8968 <add> result.add(TransferState[state.upper()]) # type: ignore[misc] <ide> elif isinstance(state, int): <ide> result.add(TransferState(state)) <ide> elif isinstance(state, TransferState): <ide><path>airflow/providers/google/cloud/sensors/datafusion.py <ide> def __init__( <ide> expected_statuses: Set[str], <ide> instance_name: str, <ide> location: str, <del> failure_statuses: Set[str] = None, <add> failure_statuses: Optional[Set[str]] = None, <ide> project_id: Optional[str] = None, <ide> namespace: str = "default", <ide> gcp_conn_id: str = 'google_cloud_default', <ide><path>airflow/providers/google/cloud/sensors/dataproc.py <ide> def __init__( <ide> *, <ide> project_id: str, <ide> dataproc_job_id: str, <del> region: str = None, <add> region: Optional[str] = None, <ide> location: Optional[str] = None, <ide> gcp_conn_id: str = 'google_cloud_default', <ide> wait_timeout: Optional[int] = None, <ide> def __init__( <ide> self.dataproc_job_id = dataproc_job_id <ide> self.region = region <ide> self.wait_timeout = wait_timeout <del> self.start_sensor_time = None <add> self.start_sensor_time: Optional[float] = None <ide> <ide> def execute(self, context: Dict): <ide> self.start_sensor_time = time.monotonic() <ide><path>airflow/providers/google/cloud/sensors/workflows.py <ide> def __init__( <ide> ): <ide> super().__init__(**kwargs) <ide> <del> self.success_states = success_states or {Execution.State.SUCCEEDED} <del> self.failure_states = failure_states or {Execution.State.FAILED, Execution.State.CANCELLED} <add> self.success_states = success_states or {Execution.State(Execution.State.SUCCEEDED)} <add> self.failure_states = failure_states or { <add> Execution.State(Execution.State.FAILED), <add> Execution.State(Execution.State.CANCELLED), <add> } <ide> self.workflow_id = workflow_id <ide> self.execution_id = execution_id <ide> self.location = location
5
Ruby
Ruby
convert formulatext test to spec
8676960d09fda58ea36a34ce202ce30431474ac6
<ide><path>Library/Homebrew/test/audit_test.rb <ide> require "formulary" <ide> require "dev-cmd/audit" <ide> <del>class FormulaTextTests < Homebrew::TestCase <del> def setup <del> super <del> @dir = mktmpdir <del> end <del> <del> def formula_text(name, body = nil, options = {}) <del> path = Pathname.new "#{@dir}/#{name}.rb" <del> path.open("w") do |f| <del> f.write <<-EOS.undent <del> class #{Formulary.class_s(name)} < Formula <del> #{body} <del> end <del> #{options[:patch]} <del> EOS <del> end <del> FormulaText.new path <del> end <del> <del> def test_simple_valid_formula <del> ft = formula_text "valid", 'url "http://www.example.com/valid-1.0.tar.gz"' <del> <del> refute ft.data?, "The formula should not have DATA" <del> refute ft.end?, "The formula should not have __END__" <del> assert ft.trailing_newline?, "The formula should have a trailing newline" <del> <del> assert ft =~ /\burl\b/, "The formula should match 'url'" <del> assert_nil ft.line_number(/desc/), "The formula should not match 'desc'" <del> assert_equal 2, ft.line_number(/\burl\b/) <del> assert ft.include?("Valid"), "The formula should include \"Valid\"" <del> end <del> <del> def test_trailing_newline <del> ft = formula_text "newline" <del> assert ft.trailing_newline?, "The formula must have a trailing newline" <del> end <del> <del> def test_has_data <del> ft = formula_text "data", "patch :DATA" <del> assert ft.data?, "The formula must have DATA" <del> end <del> <del> def test_has_end <del> ft = formula_text "end", "", patch: "__END__\na patch here" <del> assert ft.end?, "The formula must have __END__" <del> assert_equal "class End < Formula\n \nend", ft.without_patch <del> end <del>end <del> <ide> class FormulaAuditorTests < Homebrew::TestCase <ide> def setup <ide> super <ide><path>Library/Homebrew/test/dev-cmd/audit_spec.rb <add>require "dev-cmd/audit" <add>require "formulary" <add> <add>RSpec::Matchers.alias_matcher :have_data, :be_data <add>RSpec::Matchers.alias_matcher :have_end, :be_end <add>RSpec::Matchers.alias_matcher :have_trailing_newline, :be_trailing_newline <add> <add>describe FormulaText do <add> let(:dir) { @dir = Pathname.new(Dir.mktmpdir) } <add> <add> after(:each) do <add> dir.rmtree unless @dir.nil? <add> end <add> <add> def formula_text(name, body = nil, options = {}) <add> path = dir/"#{name}.rb" <add> <add> path.write <<-EOS.undent <add> class #{Formulary.class_s(name)} < Formula <add> #{body} <add> end <add> #{options[:patch]} <add> EOS <add> <add> described_class.new(path) <add> end <add> <add> specify "simple valid Formula" do <add> ft = formula_text "valid", <<-EOS.undent <add> url "http://www.example.com/valid-1.0.tar.gz" <add> EOS <add> <add> expect(ft).not_to have_data <add> expect(ft).not_to have_end <add> expect(ft).to have_trailing_newline <add> <add> expect(ft =~ /\burl\b/).to be_truthy <add> expect(ft.line_number(/desc/)).to be nil <add> expect(ft.line_number(/\burl\b/)).to eq(2) <add> expect(ft).to include("Valid") <add> end <add> <add> specify "#trailing_newline?" do <add> ft = formula_text "newline" <add> expect(ft).to have_trailing_newline <add> end <add> <add> specify "#data?" do <add> ft = formula_text "data", <<-EOS.undent <add> patch :DATA <add> EOS <add> <add> expect(ft).to have_data <add> end <add> <add> specify "#end?" do <add> ft = formula_text "end", "", patch: "__END__\na patch here" <add> expect(ft).to have_end <add> expect(ft.without_patch).to eq("class End < Formula\n \nend") <add> end <add>end
2
Java
Java
improve tests for detection of @mvc annotations
7c6a1a1bf0db6b428344ec6797a9152c5f4f6d30
<ide><path>spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/HandlerMethodAnnotationDetectionTests.java <ide> import static org.junit.Assert.assertEquals; <ide> import static org.junit.Assert.assertNotNull; <ide> <add>import java.lang.reflect.Method; <ide> import java.text.SimpleDateFormat; <ide> import java.util.Arrays; <ide> import java.util.Collection; <ide> import java.util.Date; <ide> <add>import org.aopalliance.aop.Advice; <ide> import org.junit.Test; <ide> import org.junit.runner.RunWith; <ide> import org.junit.runners.Parameterized; <ide> import org.junit.runners.Parameterized.Parameters; <add>import org.springframework.aop.Pointcut; <ide> import org.springframework.aop.framework.autoproxy.DefaultAdvisorAutoProxyCreator; <ide> import org.springframework.aop.interceptor.SimpleTraceInterceptor; <ide> import org.springframework.aop.support.DefaultPointcutAdvisor; <add>import org.springframework.aop.support.StaticMethodMatcherPointcut; <ide> import org.springframework.beans.factory.support.RootBeanDefinition; <ide> import org.springframework.beans.propertyeditors.CustomDateEditor; <add>import org.springframework.core.annotation.AnnotationUtils; <ide> import org.springframework.mock.web.MockHttpServletRequest; <ide> import org.springframework.mock.web.MockHttpServletResponse; <ide> import org.springframework.stereotype.Controller; <ide> public static Collection<Object[]> handlerTypes() { <ide> <ide> private ExceptionHandlerExceptionResolver exceptionResolver = new ExceptionHandlerExceptionResolver(); <ide> <del> public HandlerMethodAnnotationDetectionTests(Class<?> controllerType, boolean useAutoProxy) { <add> public HandlerMethodAnnotationDetectionTests(final Class<?> controllerType, boolean useAutoProxy) { <ide> GenericWebApplicationContext context = new GenericWebApplicationContext(); <ide> context.registerBeanDefinition("controller", new RootBeanDefinition(controllerType)); <add> context.registerBeanDefinition("handlerMapping", new RootBeanDefinition(RequestMappingHandlerMapping.class)); <add> context.registerBeanDefinition("handlerAdapter", new RootBeanDefinition(RequestMappingHandlerAdapter.class)); <add> context.registerBeanDefinition("exceptionResolver", new RootBeanDefinition(ExceptionHandlerExceptionResolver.class)); <ide> if (useAutoProxy) { <ide> DefaultAdvisorAutoProxyCreator autoProxyCreator = new DefaultAdvisorAutoProxyCreator(); <ide> autoProxyCreator.setBeanFactory(context.getBeanFactory()); <ide> context.getBeanFactory().addBeanPostProcessor(autoProxyCreator); <del> context.getBeanFactory().registerSingleton("advisor", new DefaultPointcutAdvisor(new SimpleTraceInterceptor())); <add> context.registerBeanDefinition("controllerAdvice", new RootBeanDefinition(ControllerAdvice.class)); <ide> } <ide> context.refresh(); <ide> <del> handlerMapping.setApplicationContext(context); <del> handlerMapping.afterPropertiesSet(); <del> handlerAdapter.afterPropertiesSet(); <del> exceptionResolver.afterPropertiesSet(); <add> this.handlerMapping = context.getBean(RequestMappingHandlerMapping.class); <add> this.handlerAdapter = context.getBean(RequestMappingHandlerAdapter.class); <add> this.exceptionResolver = context.getBean(ExceptionHandlerExceptionResolver.class); <add> } <add> <add> class TestPointcut extends StaticMethodMatcherPointcut { <add> public boolean matches(Method method, Class<?> clazz) { <add> return method.getName().equals("hashCode"); <add> } <ide> } <ide> <ide> @Test <ide> static interface MappingInterface { <ide> /** <ide> * CONTROLLER WITH INTERFACE <ide> * <del> * No AOP: <del> * All annotations can be on interface methods except parameter annotations. <del> * <ide> * JDK Dynamic proxy: <ide> * All annotations must be on the interface. <add> * <add> * Without AOP: <add> * Annotations can be on interface methods except parameter annotations. <ide> */ <ide> static class InterfaceController implements MappingInterface { <ide> <ide> public String handleException(Exception exception) { <ide> static class SupportClassController extends MappingSupportClass { <ide> } <ide> <add> <add> static class ControllerAdvice extends DefaultPointcutAdvisor { <add> <add> public ControllerAdvice() { <add> super(getControllerPointcut(), new SimpleTraceInterceptor()); <add> } <add> <add> private static StaticMethodMatcherPointcut getControllerPointcut() { <add> return new StaticMethodMatcherPointcut() { <add> public boolean matches(Method method, Class<?> targetClass) { <add> return ((AnnotationUtils.findAnnotation(targetClass, Controller.class) != null) || <add> (AnnotationUtils.findAnnotation(targetClass, RequestMapping.class) != null)); <add> } <add> }; <add> } <add> } <add> <ide> }
1
Ruby
Ruby
convert `brew test` test to spec
0248b5e357de99654b6a2f4ee5fd20829c8481e6
<ide><path>Library/Homebrew/test/dev-cmd/test_spec.rb <add>describe "brew test", :integration_test do <add> it "fails when no argument is given" do <add> expect { brew "test" } <add> .to output(/This command requires a formula argument/).to_stderr <add> .and not_to_output.to_stdout <add> .and be_a_failure <add> end <add> <add> it "fails when a Formula is not installed" do <add> expect { brew "test", testball } <add> .to output(/Testing requires the latest version of testball/).to_stderr <add> .and not_to_output.to_stdout <add> .and be_a_failure <add> end <add> <add> it "fails when a Formula has no test" do <add> shutup do <add> expect { brew "install", testball }.to be_a_success <add> end <add> <add> expect { brew "test", testball } <add> .to output(/testball defines no test/).to_stderr <add> .and not_to_output.to_stdout <add> .and be_a_failure <add> end <add> <add> it "tests a given Formula" do <add> setup_test_formula "testball", <<-EOS.undent <add> head "https://github.com/example/testball2.git" <add> <add> devel do <add> url "file://#{TEST_FIXTURE_DIR}/tarballs/testball-0.1.tbz" <add> sha256 "#{TESTBALL_SHA256}" <add> end <add> <add> keg_only "just because" <add> <add> test do <add> end <add> EOS <add> <add> shutup do <add> expect { brew "install", "testball" }.to be_a_success <add> end <add> <add> expect { brew "test", "--HEAD", "testball" } <add> .to output(/Testing testball/).to_stdout <add> .and not_to_output.to_stderr <add> .and be_a_success <add> <add> expect { brew "test", "--devel", "testball" } <add> .to output(/Testing testball/).to_stdout <add> .and not_to_output.to_stderr <add> .and be_a_success <add> end <add>end <ide><path>Library/Homebrew/test/test_formula_test.rb <del>require "testing_env" <del> <del>class IntegrationCommandTestTestFormula < IntegrationCommandTestCase <del> def test_test_formula <del> assert_match "This command requires a formula argument", cmd_fail("test") <del> assert_match "Testing requires the latest version of testball", <del> cmd_fail("test", testball) <del> <del> cmd("install", testball) <del> assert_match "testball defines no test", cmd_fail("test", testball) <del> <del> setup_test_formula "testball_copy", <<-EOS.undent <del> head "https://github.com/example/testball2.git" <del> <del> devel do <del> url "file://#{TEST_FIXTURE_DIR}/tarballs/testball-0.1.tbz" <del> sha256 "#{TESTBALL_SHA256}" <del> end <del> <del> keg_only "just because" <del> <del> test do <del> end <del> EOS <del> <del> cmd("install", "testball_copy") <del> assert_match "Testing testball_copy", cmd("test", "--HEAD", "testball_copy") <del> assert_match "Testing testball_copy", cmd("test", "--devel", "testball_copy") <del> end <del>end
2
Text
Text
fix types in api docs for moves in parser and ner
60520d86693699c1221a4414a133f76ffb9601b0
<ide><path>website/docs/api/dependencyparser.md <ide> shortcut for this and instantiate the component using its string name and <ide> | `vocab` | The shared vocabulary. ~~Vocab~~ | <ide> | `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | <ide> | `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | <del>| `moves` | A list of transition names. Inferred from the data if not provided. ~~Optional[List[str]]~~ | <add>| `moves` | A list of transition names. Inferred from the data if not provided. ~~Optional[TransitionSystem]~~ | <ide> | _keyword-only_ | | <ide> | `update_with_oracle_cut_size` | During training, cut long sequences into shorter segments by creating intermediate states based on the gold-standard history. The model is not very sensitive to this parameter, so you usually won't need to change it. Defaults to `100`. ~~int~~ | <ide> | `learn_tokens` | Whether to learn to merge subtokens that are split relative to the gold standard. Experimental. Defaults to `False`. ~~bool~~ | <ide><path>website/docs/api/entityrecognizer.md <ide> architectures and their arguments and hyperparameters. <ide> <ide> | Setting | Description | <ide> | ----------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | <del>| `moves` | A list of transition names. Inferred from the data if not provided. Defaults to `None`. ~~Optional[List[str]]~~ | <add>| `moves` | A list of transition names. Inferred from the data if not provided. Defaults to `None`. ~~Optional[TransitionSystem]~~ | <ide> | `update_with_oracle_cut_size` | During training, cut long sequences into shorter segments by creating intermediate states based on the gold-standard history. The model is not very sensitive to this parameter, so you usually won't need to change it. Defaults to `100`. ~~int~~ | <ide> | `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. Defaults to [TransitionBasedParser](/api/architectures#TransitionBasedParser). ~~Model[List[Doc], List[Floats2d]]~~ | <ide> | `incorrect_spans_key` | This key refers to a `SpanGroup` in `doc.spans` that specifies incorrect spans. The NER will learn not to predict (exactly) those spans. Defaults to `None`. ~~Optional[str]~~ | <ide> shortcut for this and instantiate the component using its string name and <ide> | `vocab` | The shared vocabulary. ~~Vocab~~ | <ide> | `model` | The [`Model`](https://thinc.ai/docs/api-model) powering the pipeline component. ~~Model[List[Doc], List[Floats2d]]~~ | <ide> | `name` | String name of the component instance. Used to add entries to the `losses` during training. ~~str~~ | <del>| `moves` | A list of transition names. Inferred from the data if set to `None`, which is the default. ~~Optional[List[str]]~~ | <add>| `moves` | A list of transition names. Inferred from the data if set to `None`, which is the default. ~~Optional[TransitionSystem]~~ | <ide> | _keyword-only_ | | <ide> | `update_with_oracle_cut_size` | During training, cut long sequences into shorter segments by creating intermediate states based on the gold-standard history. The model is not very sensitive to this parameter, so you usually won't need to change it. Defaults to `100`. ~~int~~ | <ide> | `incorrect_spans_key` | Identifies spans that are known to be incorrect entity annotations. The incorrect entity annotations can be stored in the span group in [`Doc.spans`](/api/doc#spans), under this key. Defaults to `None`. ~~Optional[str]~~ |
2
Text
Text
fix russian translation of rust looping docs
1b3635f6fb90b3c470b6afc1919a5a9ebea2c5dc
<ide><path>guide/russian/rust/loops/index.md <ide> localeTitle: Loops <ide> --- <ide> # Loops <ide> <del>Внутри Rust существует три типа встроенных механизмов циклизации: `loop` , `while` и `for` . <add>Rust поддерживает три типа циклов: `loop` , `while` и `for` . <ide> <ide> ## Бесконечное повторение с `loop` <ide> <del>В Rust структура `loop` будет непрерывно выполнять блок кода ad infinitum (или пока вы явно не остановите его). <add>В Rust конструкция `loop` будет непрерывно выполнять блок кода до бесконечности (или пока вы явно не остановите его). <ide> <del>Вот пример программы, использующей `loop` для непрерывного вывода слова «снова» на терминал: <add>Вот пример программы, использующей `loop` для непрерывного вывода слова «again!» на терминал: <ide> <ide> ```rust <ide> fn main() { <ide> fn main() { <ide> <ide> ## Условный цикл с `while` <ide> <del>Вышеупомянутый механизм не очень полезен, если мы не вводим какое-то условие остановки для `loop` для которого нужно проверить. К счастью, Rust имеет встроенную циклическую структуру, называемую `while` , которую вы можете использовать для непрерывного выполнения блока кода, в то время как какое-то условие истинно. <add>Описанный выше механизм не очень полезен, если мы не добавим какое-то условие, по которому `loop` будет останавливаться. К счастью, в Rust есть встроенная конструкция цикла `while`. Её можно использовать для непрерывного выполнения блока до тех пор, пока какое-то условие истинно. <ide> <ide> Вот пример программы, использующей `while` для обратного отсчета от 5: <ide> <ide> fn main() { <ide> } <ide> ``` <ide> <del>Выполните [здесь](https://play.rust-lang.org/?gist=62677371a8590be27c84dcae7068de57&version=stable) код. <add>Запустите код [здесь](https://play.rust-lang.org/?gist=62677371a8590be27c84dcae7068de57&version=stable). <ide> <ide> ## Перебор коллекции с `for` <ide> <del>В некоторых случаях вам может потребоваться повторить и использовать элементы коллекции (например, массив). В то время как вы могли бы добиться этого с помощью `while` цикл и индексную переменную для доступа к каждому элементу, Rust предоставляет `for` цикла , чтобы сделать эту операцию гораздо проще. <add>В некоторых случаях вам может потребоваться пройтись по элементам коллекции (например, массива) и как-то обработать их. Это можно было бы сделать с помощью `while`, используя индексную переменную для доступа к каждому элементу. Чтобы сделать эту операцию гораздо проще, Rust предлагает цикл `for`. <ide> <ide> Вот пример программы, которая печатает каждое число в массиве на терминал, используя `for` : <ide> <ide> fn main() { <ide> } <ide> ``` <ide> <del>Выполните [здесь](https://play.rust-lang.org/?gist=0c2acf21b96a81ebd411e4a7dc5a19fd&version=stable) код. <add>Запустите код [здесь](https://play.rust-lang.org/?gist=0c2acf21b96a81ebd411e4a7dc5a19fd&version=stable). <ide> <del>Подобно итераторам в C ++, `.iter()` возвращает итератор в `collection` , который затем может быть зациклирован для доступа к каждому `element` . Для получения дополнительной информации перейдите к документации Rust по [потоку управления](https://doc.rust-lang.org/book/second-edition/ch03-05-control-flow.html) . <ide>\ No newline at end of file <add>Подобно итераторам в C++, `.iter()` возвращает итератор коллекции `collection`, который затем можно использовать в цикле для доступа к каждому элементу `element`. Для получения дополнительной информации перейдите к документации Rust по порядку исполнения ([control flow](https://doc.rust-lang.org/book/second-edition/ch03-05-control-flow.html)).
1
Javascript
Javascript
simulate synthetic events using reacttestutils
36e97bac21642f73c15d45176dbd304fff327c5a
<ide><path>src/browser/ReactEventEmitter.js <ide> var ReactEventEmitter = merge(ReactEventEmitterMixin, { <ide> } <ide> }, <ide> <add> eventNameDispatchConfigs: EventPluginHub.eventNameDispatchConfigs, <add> <ide> registrationNameModules: EventPluginHub.registrationNameModules, <ide> <ide> putListener: EventPluginHub.putListener, <ide><path>src/browser/__tests__/ReactEventEmitter-test.js <ide> describe('ReactEventEmitter', function() { <ide> it('should not invoke handlers if ReactEventEmitter is disabled', function() { <ide> registerSimpleTestHandler(); <ide> ReactEventEmitter.setEnabled(false); <del> ReactTestUtils.Simulate.click(CHILD); <add> ReactTestUtils.SimulateNative.click(CHILD); <ide> expect(LISTENER.mock.calls.length).toBe(0); <ide> ReactEventEmitter.setEnabled(true); <del> ReactTestUtils.Simulate.click(CHILD); <add> ReactTestUtils.SimulateNative.click(CHILD); <ide> expect(LISTENER.mock.calls.length).toBe(1); <ide> }); <ide> <ide> describe('ReactEventEmitter', function() { <ide> ON_TOUCH_TAP_KEY, <ide> recordID.bind(null, getID(CHILD)) <ide> ); <del> ReactTestUtils.Simulate.touchStart( <add> ReactTestUtils.SimulateNative.touchStart( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <del> ReactTestUtils.Simulate.touchEnd( <add> ReactTestUtils.SimulateNative.touchEnd( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <ide> describe('ReactEventEmitter', function() { <ide> ON_TOUCH_TAP_KEY, <ide> recordID.bind(null, getID(CHILD)) <ide> ); <del> ReactTestUtils.Simulate.touchStart( <add> ReactTestUtils.SimulateNative.touchStart( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <del> ReactTestUtils.Simulate.touchEnd( <add> ReactTestUtils.SimulateNative.touchEnd( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, tapMoveThreshold - 1) <ide> ); <ide> describe('ReactEventEmitter', function() { <ide> ON_TOUCH_TAP_KEY, <ide> recordID.bind(null, getID(CHILD)) <ide> ); <del> ReactTestUtils.Simulate.touchStart( <add> ReactTestUtils.SimulateNative.touchStart( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <del> ReactTestUtils.Simulate.touchEnd( <add> ReactTestUtils.SimulateNative.touchEnd( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, tapMoveThreshold + 1) <ide> ); <ide> describe('ReactEventEmitter', function() { <ide> ON_TOUCH_TAP_KEY, <ide> recordID.bind(null, getID(GRANDPARENT)) <ide> ); <del> ReactTestUtils.Simulate.touchStart( <add> ReactTestUtils.SimulateNative.touchStart( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <del> ReactTestUtils.Simulate.touchEnd( <add> ReactTestUtils.SimulateNative.touchEnd( <ide> CHILD, <ide> ReactTestUtils.nativeTouchData(0, 0) <ide> ); <ide><path>src/browser/dom/components/__tests__/ReactDOMInput-test.js <ide> describe('ReactDOMInput', function() { <ide> var node = renderTextInput(stub); <ide> <ide> node.value = 'giraffe'; <del> ReactTestUtils.Simulate.input(node); <add> ReactTestUtils.Simulate.change(node); <ide> expect(node.value).toBe('0'); <ide> }); <ide> <ide> describe('ReactDOMInput', function() { <ide> aNode.checked = false; <ide> expect(cNode.checked).toBe(true); <ide> <del> // Now let's run the actual ReactDOMInput change event handler (on radio <del> // inputs, ChangeEventPlugin listens for the `click` event so trigger that) <del> ReactTestUtils.Simulate.click(bNode); <add> // Now let's run the actual ReactDOMInput change event handler <add> ReactTestUtils.Simulate.change(bNode); <ide> <ide> // The original state should have been restored <ide> expect(aNode.checked).toBe(true); <ide> describe('ReactDOMInput', function() { <ide> expect(link.requestChange.mock.calls.length).toBe(0); <ide> <ide> instance.getDOMNode().value = 'test'; <del> ReactTestUtils.Simulate.input(instance.getDOMNode()); <add> ReactTestUtils.Simulate.change(instance.getDOMNode()); <ide> <ide> expect(link.requestChange.mock.calls.length).toBe(1); <ide> expect(link.requestChange.mock.calls[0][0]).toEqual('test'); <ide> describe('ReactDOMInput', function() { <ide> expect(link.requestChange.mock.calls.length).toBe(0); <ide> <ide> instance.getDOMNode().checked = false; <del> ReactTestUtils.Simulate.click(instance.getDOMNode()); <add> ReactTestUtils.Simulate.change(instance.getDOMNode()); <ide> <ide> expect(link.requestChange.mock.calls.length).toBe(1); <ide> expect(link.requestChange.mock.calls[0][0]).toEqual(false); <ide><path>src/browser/dom/components/__tests__/ReactDOMTextarea-test.js <ide> describe('ReactDOMTextarea', function() { <ide> var node = renderTextarea(stub); <ide> <ide> node.value = 'giraffe'; <del> ReactTestUtils.Simulate.input(node); <add> ReactTestUtils.Simulate.change(node); <ide> expect(node.value).toBe('0'); <ide> }); <ide> <ide> describe('ReactDOMTextarea', function() { <ide> expect(link.requestChange.mock.calls.length).toBe(0); <ide> <ide> instance.getDOMNode().value = 'test'; <del> ReactTestUtils.Simulate.input(instance.getDOMNode()); <add> ReactTestUtils.Simulate.change(instance.getDOMNode()); <ide> <ide> expect(link.requestChange.mock.calls.length).toBe(1); <ide> expect(link.requestChange.mock.calls[0][0]).toEqual('test'); <ide><path>src/browser/eventPlugins/__tests__/AnalyticsEventPlugin-test.js <ide> describe('AnalyticsEventPlugin', function() { <ide> <ide> // Simulate some clicks <ide> for (var i = 0; i < numClickEvents; i++) { <del> ReactTestUtils.Simulate.click(renderedComponent.refs.testDiv); <add> ReactTestUtils.SimulateNative.click(renderedComponent.refs.testDiv); <ide> } <ide> // Simulate some double clicks <ide> for (i = 0; i < numDoubleClickEvents; i++) { <del> ReactTestUtils.Simulate.doubleClick(renderedComponent.refs.testDiv); <add> ReactTestUtils.SimulateNative.doubleClick(renderedComponent.refs.testDiv); <ide> } <ide> // Simulate some other events not being tracked for analytics <del> ReactTestUtils.Simulate.focus(renderedComponent.refs.testDiv); <add> ReactTestUtils.SimulateNative.focus(renderedComponent.refs.testDiv); <ide> <ide> window.mockRunTimersOnce(); <ide> expect(cb).toBeCalled(); <ide> describe('AnalyticsEventPlugin', function() { <ide> <ide> var error = false; <ide> try { <del> ReactTestUtils.Simulate.click(renderedComponent.refs.testDiv); <add> ReactTestUtils.SimulateNative.click(renderedComponent.refs.testDiv); <ide> } catch(e) { <ide> error = true; <ide> } <ide><path>src/core/__tests__/ReactBind-test.js <ide> describe('autobinding', function() { <ide> render: function() { <ide> return ( <ide> <div <del> onMouseEnter={this.onMouseEnter.bind(this)} <del> onMouseLeave={this.onMouseLeave} <add> onMouseOver={this.onMouseEnter.bind(this)} <add> onMouseOut={this.onMouseLeave} <ide> onClick={this.onClick} <ide> /> <ide> ); <ide><path>src/event/EventPluginHub.js <ide> var EventPluginHub = { <ide> <ide> }, <ide> <add> eventNameDispatchConfigs: EventPluginRegistry.eventNameDispatchConfigs, <add> <ide> registrationNameModules: EventPluginRegistry.registrationNameModules, <ide> <ide> /** <ide><path>src/event/EventPluginRegistry.js <ide> function recomputePluginOrdering() { <ide> * @private <ide> */ <ide> function publishEventForPlugin(dispatchConfig, PluginModule, eventName) { <add> invariant( <add> !EventPluginRegistry.eventNameDispatchConfigs[eventName], <add> 'EventPluginHub: More than one plugin attempted to publish the same ' + <add> 'event name, `%s`.', <add> eventName <add> ); <add> EventPluginRegistry.eventNameDispatchConfigs[eventName] = dispatchConfig; <add> <ide> var phasedRegistrationNames = dispatchConfig.phasedRegistrationNames; <ide> if (phasedRegistrationNames) { <ide> for (var phaseName in phasedRegistrationNames) { <ide> var EventPluginRegistry = { <ide> plugins: [], <ide> <ide> /** <del> * Mapping from registration names to plugin modules. <add> * Mapping from event name to dispatch config <add> */ <add> eventNameDispatchConfigs: {}, <add> <add> /** <add> * Mapping from registration name to plugin module <ide> */ <ide> registrationNameModules: {}, <ide> <ide> var EventPluginRegistry = { <ide> } <ide> } <ide> EventPluginRegistry.plugins.length = 0; <add> <add> var eventNameDispatchConfigs = EventPluginRegistry.eventNameDispatchConfigs; <add> for (var eventName in eventNameDispatchConfigs) { <add> if (eventNameDispatchConfigs.hasOwnProperty(eventName)) { <add> delete eventNameDispatchConfigs[eventName]; <add> } <add> } <add> <ide> var registrationNameModules = EventPluginRegistry.registrationNameModules; <ide> for (var registrationName in registrationNameModules) { <ide> if (registrationNameModules.hasOwnProperty(registrationName)) { <ide><path>src/test/ReactTestUtils.js <ide> * @providesModule ReactTestUtils <ide> */ <ide> <add>"use strict"; <add> <ide> var EventConstants = require('EventConstants'); <add>var EventPluginHub = require('EventPluginHub'); <add>var EventPropagators = require('EventPropagators'); <ide> var React = require('React'); <ide> var ReactComponent = require('ReactComponent'); <ide> var ReactDOM = require('ReactDOM'); <ide> var ReactEventEmitter = require('ReactEventEmitter'); <add>var ReactMount = require('ReactMount'); <ide> var ReactTextComponent = require('ReactTextComponent'); <add>var ReactUpdates = require('ReactUpdates'); <add>var SyntheticEvent = require('SyntheticEvent'); <ide> <ide> var mergeInto = require('mergeInto'); <ide> var copyProperties = require('copyProperties'); <ide> var ReactTestUtils = { <ide> <ide> /** <ide> * Simulates a top level event being dispatched from a raw event that occured <del> * on and `Element` node. <add> * on an `Element` node. <ide> * @param topLevelType {Object} A type from `EventConstants.topLevelTypes` <ide> * @param {!Element} node The dom to simulate an event occurring on. <ide> * @param {?Event} fakeNativeEvent Fake native event to use in SyntheticEvent. <ide> */ <del> simulateEventOnNode: function(topLevelType, node, fakeNativeEvent) { <add> simulateNativeEventOnNode: function(topLevelType, node, fakeNativeEvent) { <ide> var virtualHandler = <ide> ReactEventEmitter.TopLevelCallbackCreator.createTopLevelCallback( <ide> topLevelType <ide> var ReactTestUtils = { <ide> * @param comp {!ReactDOMComponent} <ide> * @param {?Event} fakeNativeEvent Fake native event to use in SyntheticEvent. <ide> */ <del> simulateEventOnDOMComponent: function(topLevelType, comp, fakeNativeEvent) { <del> ReactTestUtils.simulateEventOnNode( <add> simulateNativeEventOnDOMComponent: function( <add> topLevelType, <add> comp, <add> fakeNativeEvent) { <add> ReactTestUtils.simulateNativeEventOnNode( <ide> topLevelType, <ide> comp.getDOMNode(), <ide> fakeNativeEvent <ide> var ReactTestUtils = { <ide> }; <ide> }, <ide> <del> Simulate: null // Will populate <add> Simulate: {}, <add> SimulateNative: {} <ide> }; <ide> <ide> /** <ide> * Exports: <ide> * <ide> * - `ReactTestUtils.Simulate.click(Element/ReactDOMComponent)` <ide> * - `ReactTestUtils.Simulate.mouseMove(Element/ReactDOMComponent)` <del> * - `ReactTestUtils.Simulate.mouseIn/ReactDOMComponent)` <del> * - `ReactTestUtils.Simulate.mouseOut(Element/ReactDOMComponent)` <add> * - `ReactTestUtils.Simulate.change(Element/ReactDOMComponent)` <add> * - ... (All keys from event plugin `eventTypes` objects) <add> */ <add>function makeSimulator(eventType) { <add> return function(domComponentOrNode, eventData) { <add> var node; <add> if (ReactTestUtils.isDOMComponent(domComponentOrNode)) { <add> node = domComponentOrNode.getDOMNode(); <add> } else if (domComponentOrNode.tagName) { <add> node = domComponentOrNode; <add> } <add> <add> var fakeNativeEvent = new Event(); <add> fakeNativeEvent.target = node; <add> // We don't use SyntheticEvent.getPooled in order to not have to worry about <add> // properly destroying any properties assigned from `eventData` upon release <add> var event = new SyntheticEvent( <add> ReactEventEmitter.eventNameDispatchConfigs[eventType], <add> ReactMount.getID(node), <add> fakeNativeEvent <add> ); <add> mergeInto(event, eventData); <add> EventPropagators.accumulateTwoPhaseDispatches(event); <add> <add> ReactUpdates.batchedUpdates(function() { <add> EventPluginHub.enqueueEvents(event); <add> EventPluginHub.processEventQueue(); <add> }); <add> }; <add>} <add> <add>var eventType; <add>for (eventType in ReactEventEmitter.eventNameDispatchConfigs) { <add> /** <add> * @param {!Element || ReactDOMComponent} domComponentOrNode <add> * @param {?object} eventData Fake event data to use in SyntheticEvent. <add> */ <add> ReactTestUtils.Simulate[eventType] = makeSimulator(eventType); <add>} <add> <add>/** <add> * Exports: <add> * <add> * - `ReactTestUtils.SimulateNative.click(Element/ReactDOMComponent)` <add> * - `ReactTestUtils.SimulateNative.mouseMove(Element/ReactDOMComponent)` <add> * - `ReactTestUtils.SimulateNative.mouseIn/ReactDOMComponent)` <add> * - `ReactTestUtils.SimulateNative.mouseOut(Element/ReactDOMComponent)` <ide> * - ... (All keys from `EventConstants.topLevelTypes`) <ide> * <ide> * Note: Top level event types are a subset of the entire set of handler types <ide> * (which include a broader set of "synthetic" events). For example, onDragDone <del> * is a synthetic event. You certainly may write test cases for these event <del> * types, but it doesn't make sense to simulate them at this low of a level. In <del> * this case, the way you test an `onDragDone` event is by simulating a series <del> * of `mouseMove`/ `mouseDown`/`mouseUp` events - Then, a synthetic event of <del> * type `onDragDone` will be constructed and dispached through your system <del> * automatically. <add> * is a synthetic event. Except when testing an event plugin or React's event <add> * handling code specifically, you probably want to use ReactTestUtils.Simulate <add> * to dispatch synthetic events. <ide> */ <ide> <del>function makeSimulator(eventType) { <add>function makeNativeSimulator(eventType) { <ide> return function(domComponentOrNode, nativeEventData) { <ide> var fakeNativeEvent = new Event(eventType); <ide> mergeInto(fakeNativeEvent, nativeEventData); <ide> if (ReactTestUtils.isDOMComponent(domComponentOrNode)) { <del> ReactTestUtils.simulateEventOnDOMComponent( <add> ReactTestUtils.simulateNativeEventOnDOMComponent( <ide> eventType, <ide> domComponentOrNode, <ide> fakeNativeEvent <ide> ); <ide> } else if (!!domComponentOrNode.tagName) { <ide> // Will allow on actual dom nodes. <del> ReactTestUtils.simulateEventOnNode( <add> ReactTestUtils.simulateNativeEventOnNode( <ide> eventType, <ide> domComponentOrNode, <ide> fakeNativeEvent <ide> function makeSimulator(eventType) { <ide> }; <ide> } <ide> <del>ReactTestUtils.Simulate = {}; <ide> var eventType; <ide> for (eventType in topLevelTypes) { <ide> // Event type is stored as 'topClick' - we transform that to 'click' <ide> for (eventType in topLevelTypes) { <ide> * @param {!Element || ReactDOMComponent} domComponentOrNode <ide> * @param {?Event} nativeEventData Fake native event to use in SyntheticEvent. <ide> */ <del> ReactTestUtils.Simulate[convenienceName] = makeSimulator(eventType); <add> ReactTestUtils.SimulateNative[convenienceName] = <add> makeNativeSimulator(eventType); <ide> } <ide> <ide> module.exports = ReactTestUtils;
9
Javascript
Javascript
add support for cause in aborterror
36c0ac05e6d82786c58bbbff46715bb24efe90b4
<ide><path>lib/internal/errors.js <ide> function hideInternalStackFrames(error) { <ide> // to make usage of the error in userland and readable-stream easier. <ide> // It is a regular error with `.code` and `.name`. <ide> class AbortError extends Error { <del> constructor() { <del> super('The operation was aborted'); <add> constructor(message = 'The operation was aborted', options = undefined) { <add> if (options !== undefined && typeof options !== 'object') { <add> throw new codes.ERR_INVALID_ARG_TYPE('options', 'Object', options); <add> } <add> super(message, options); <ide> this.code = 'ABORT_ERR'; <ide> this.name = 'AbortError'; <ide> } <ide><path>test/parallel/test-errors-aborterror.js <add>// Flags: --expose-internals <add>'use strict'; <add> <add>require('../common'); <add>const { <add> strictEqual, <add> throws, <add>} = require('assert'); <add>const { AbortError } = require('internal/errors'); <add> <add>{ <add> const err = new AbortError(); <add> strictEqual(err.message, 'The operation was aborted'); <add> strictEqual(err.cause, undefined); <add>} <add> <add>{ <add> const cause = new Error('boom'); <add> const err = new AbortError('bang', { cause }); <add> strictEqual(err.message, 'bang'); <add> strictEqual(err.cause, cause); <add>} <add> <add>{ <add> throws(() => new AbortError('', false), { <add> code: 'ERR_INVALID_ARG_TYPE' <add> }); <add> throws(() => new AbortError('', ''), { <add> code: 'ERR_INVALID_ARG_TYPE' <add> }); <add>}
2
Text
Text
change several wordings of the text.
c7f24919b2db89568a041072296e599b52d50f78
<ide><path>curriculum/challenges/russian/02-javascript-algorithms-and-data-structures/basic-javascript/comment-your-javascript-code.russian.md <ide> localeTitle: Комментарий <ide> --- <ide> <ide> ## Description <del><section id="description"> Комментарии - это строки кода, которые JavaScript намеренно игнорирует. Комментарии - это отличный способ оставить заметки для себя и для других людей, которым позже нужно будет понять, что делает этот код. Существует два способа написания комментариев в JavaScript: Использование <code>//</code> говорит JavaScript игнорировать оставшуюся часть текста в текущей строке: <blockquote> // Это встроенный комментарий. </blockquote> Вы можете сделать многострочный комментарий, начинающийся с <code>/*</code> и заканчивающийся на <code>*/</code> : <blockquote> /* Это <br> многострочный комментарий * / </blockquote> <strong>Лучшая практика</strong> <br> Когда вы пишете код, вы должны регулярно добавлять комментарии, чтобы уточнить функцию частей вашего кода. Хороший комментарий может помочь сообщить о том для чего используется ваш код - как другим, так <em>и</em> вам в будущем. </section> <add><section id="description"> Комментарии представляют собой строки кода, которые JavaScript игнорирует. Использование комментариев является отличным способом оставить заметки себе или другим, кому придется разбираться в работе кода. <add> Комментарии в JavaScript бывают двух видов : <add> Два идущих подряд слэша <code>//</code> являются указанием игнорировать весь идущий за ними текст на этой строке: <add> <blockquote> // Это комментарий в строке. </blockquote> <add> Также можно сделать многострочный комментарий: он должен начинаться с <code>/*</code> и заканчиваться на <code>*/</code> : <add> <blockquote> /* Это <br> многострочный комментарий * / </blockquote> <add> <strong>Лучшая практика</strong> <br> В процессе написания кода имеет смысл регулярно добавлять комментарии, в которых будет описываться функциональность различных частей кода. Хороший комментарий может помочь разобраться в сути вашего кода - как другим людям, так <em>и</em> вам в будущем. </section> <ide> <ide> ## Instructions <del><section id="instructions"> Попробуйте создать один из комментариев каждого типа. </section> <add><section id="instructions"> Попробуйте создать один из комментариев каждого вида. </section> <ide> <ide> ## Tests <ide> <section id='tests'> <ide> <ide> ```yml <ide> tests: <del> - text: 'Создайте комментарий стиля <code>//</code> , содержащий не менее пяти букв.' <add> - text: 'Создайте комментарий вида <code>//</code>, содержащий не менее пяти букв.' <ide> testString: 'assert(code.match(/(\/\/)...../g), "Create a <code>//</code> style comment that contains at least five letters.");' <del> - text: 'Создайте комментарий <code>/* */</code> style, содержащий не менее пяти букв.' <add> - text: 'Создайте комментарий вида <code>/* */</code>, содержащий не менее пяти букв.' <ide> testString: 'assert(code.match(/(\/\*)([^\/]{5,})(?=\*\/)/gm), "Create a <code>/* */</code> style comment that contains at least five letters.");' <ide> <ide> ```
1
PHP
PHP
add tests for new urlhelper methods
35dc0c6dc6966bcc0f1b8df81df26415283ed42b
<ide><path>tests/TestCase/View/Helper/UrlHelperTest.php <ide> public function testAssetTimestampPluginsAndThemes() <ide> $this->assertRegExp('#/test_theme/js/non_existant.js\?$#', $result, 'No error on missing file'); <ide> } <ide> <add> /** <add> * test script() <add> * <add> * @return void <add> */ <add> public function testScript() <add> { <add> Router::connect('/:controller/:action/*'); <add> <add> $this->Helper->webroot = ''; <add> $result = $this->Helper->script( <add> [ <add> 'controller' => 'js', <add> 'action' => 'post', <add> '_ext' => 'js' <add> ], <add> ['fullBase' => true] <add> ); <add> $this->assertEquals(Router::fullBaseUrl() . '/js/post.js', $result); <add> } <add> <add> /** <add> * test image() <add> * <add> * @return void <add> */ <add> public function testImage() <add> { <add> $result = $this->Helper->image('foo.jpg'); <add> $this->assertEquals('img/foo.jpg', $result); <add> <add> $result = $this->Helper->image('foo.jpg', ['fullBase' => true]); <add> $this->assertEquals(Router::fullBaseUrl() . '/img/foo.jpg', $result); <add> <add> $result = $this->Helper->image('dir/sub dir/my image.jpg'); <add> $this->assertEquals('img/dir/sub%20dir/my%20image.jpg', $result); <add> <add> $result = $this->Helper->image('foo.jpg?one=two&three=four'); <add> $this->assertEquals('img/foo.jpg?one=two&amp;three=four', $result); <add> <add> $result = $this->Helper->image('dir/big+tall/image.jpg'); <add> $this->assertEquals('img/dir/big%2Btall/image.jpg', $result); <add> } <add> <add> /** <add> * test css <add> * <add> * @return void <add> */ <add> public function testCss() <add> { <add> $result = $this->Helper->css('style'); <add> $this->assertEquals('css/style.css', $result); <add> } <add> <ide> /** <ide> * Test generating paths with webroot(). <ide> *
1
Ruby
Ruby
pass benchmark.ms block through to realtime
a8ab5ff499080a4a4c125df949dfcee6e84e4c99
<ide><path>activesupport/lib/active_support/core_ext/benchmark.rb <ide> class << Benchmark <ide> # <ide> # Benchmark.ms { User.all } <ide> # # => 0.074 <del> def ms <del> 1000 * realtime { yield } <add> def ms(&block) <add> 1000 * realtime(&block) <ide> end <ide> end
1
Ruby
Ruby
improve brew pull to close issues
dc3623f5a61216af9d0a811cb6c5618b666e2f98
<ide><path>Library/Contributions/examples/brew-pull.rb <ide> HOMEBREW_REPOSITORY.cd do <ide> ARGV.each do|arg| <ide> # This regex should work, if it's too precise, feel free to fix it. <del> if !arg.match 'https:\/\/github.com\/\w+\/homebrew\/(pull\/\d+|commit\/\w{4,40})' <add> urlmatch = arg.match 'https:\/\/github.com\/\w+\/homebrew\/(pull\/(\d+)|commit\/\w{4,40})' <add> if !urlmatch <ide> ohai 'Ignoring URL:', "Not a GitHub pull request or commit: #{arg}" <ide> next <ide> end <del> <add> <ide> # GitHub provides commits'/pull-requests' raw patches using this URL. <ide> url = arg + '.patch' <del> <add> <ide> # The cache directory seems like a good place to put patches. <ide> patchpath = (HOMEBREW_CACHE+File.basename(url)) <ide> curl url, '-o', patchpath <del> <add> <ide> # Makes sense to squash whitespace errors, we don't want them. <ide> ohai 'Applying patch' <ide> safe_system 'git', 'am', '--signoff', '--whitespace=fix', patchpath <del> <add> <add> issue = urlmatch[2] <add> if issue <add> ohai "Patch closes issue ##{issue}" <add> message = `git log HEAD^..HEAD --format=%B` <add> <add> # If this is a pull request, append a close message. <add> if !message.include? 'Closes #' <add> issueline = "Closes ##{issue}." <add> signed = 'Signed-off-by:' <add> message = message.gsub signed, issueline + "\n\n" + signed <add> safe_system 'git', 'commit', '--amend', '-q', '-m', message <add> end <add> end <add> <ide> ohai 'Patch changed:' <ide> safe_system 'git', 'diff', 'HEAD^..HEAD', '--stat' <del> <add> <ide> if install <ide> status, filename = `git diff HEAD^..HEAD --name-status`.split() <ide> # Don't try and do anything to removed files.
1
Javascript
Javascript
call pushstate when state null and at initial url
6a863c6cbef95f232d2933486c69830e381bcc1c
<ide><path>packages/ember-application/lib/system/history_location.js <ide> Ember.HistoryLocation = Ember.Object.extend({ <ide> <ide> path = this.formatPath(path); <ide> <del> if ((initialURL && initialURL !== path) || (state && state.path !== path)) { <del> set(this, '_initialURL', null); <add> if ((initialURL !== path && !state) || (state && state.path !== path)) { <ide> window.history.pushState({ path: path }, null, path); <ide> } <ide> },
1
Text
Text
add policy for landing new npm releases
7570ad58e91f0cbcf81daf3cdb7e4928b224249e
<ide><path>doc/guides/maintaining-npm.md <ide> # Maintaining npm in Node.js <ide> <add>New pull requests should be opened when a "next" version of npm has <add>been released. Once the "next" version has been promoted to "latest" <add>the PR should be updated as necessary. <add> <add>Two weeks after the "latest" release has been promoted it can land on master <add>assuming no major regressions are found. There are no additional constraints <add>for Semver-Major releases. <add> <add>The specific Node.js release streams the new version will be able to land into <add>are at the discretion of the release and LTS teams. <add> <add>This process only covers full updates to new versions of npm. Cherry-picked <add>changes can be reviewed and landed via the normal consensus seeking process. <add> <ide> ## Step 1: Clone npm <ide> <ide> ```console
1
Text
Text
remove another duplicate changelog entry
4d5d0f62c64abb8d47a975035ccfd62cf1b1c86a
<ide><path>CHANGELOG.md <ide> * [BUGFIX] Bubble `loading` action above pivot route <ide> * [BUGFIX] reduceComputed ignore changes during reset. <ide> * [BUGFIX] reduceComputed handle out-of-range index. <del>* [BUGFIX] Allow Ember.Object.create to accept an Ember.Object. <ide> * [FEATURE] Add support for nested loading/error substates. A loading substate will be entered when a slow-to-resolve promise is returned from one of the Route#model hooks during a transition and an appropriately-named loading template/route can be found. An error substate will be entered when one of the Route#model hooks returns a rejecting promise and an appropriately-named error template/route can be found. <ide> * [FEATURE] Components and helpers registered on the container can be rendered in templates via their dasherized names. E.g. {{helper-name}} or {{component-name}} <ide> * [FEATURE] Add a `didTransition` hook to the router.
1
Java
Java
fix error message typo in shallowetagheaderfilter
85a80e2a946898d7258228092c8aab96b6b25f3f
<ide><path>spring-web/src/main/java/org/springframework/web/filter/ShallowEtagHeaderFilter.java <ide> protected void doFilterInternal(HttpServletRequest request, HttpServletResponse <ide> private void updateResponse(HttpServletRequest request, HttpServletResponse response) throws IOException { <ide> ContentCachingResponseWrapper responseWrapper = <ide> WebUtils.getNativeResponse(response, ContentCachingResponseWrapper.class); <del> Assert.notNull(responseWrapper, "ShallowEtagResponseWrapper not found"); <add> Assert.notNull(responseWrapper, "ContentCachingResponseWrapper not found"); <ide> HttpServletResponse rawResponse = (HttpServletResponse) responseWrapper.getResponse(); <ide> int statusCode = responseWrapper.getStatusCode(); <ide>
1
PHP
PHP
fix usage of 'escape' => false
74792f634b44e0a995c0f20187b7b65fe3d3ac87
<ide><path>src/View/Helper/HtmlHelper.php <ide> public function div($class = null, $text = null, array $options = []) <ide> */ <ide> public function para($class, $text, array $options = []) <ide> { <del> if (isset($options['escape'])) { <add> if (!empty($options['escape'])) { <ide> $text = h($text); <ide> } <ide> if ($class && !empty($class)) { <ide><path>tests/TestCase/View/Helper/HtmlHelperTest.php <ide> public function testPara() <ide> $result = $this->Html->para('class-name', '<text>', ['escape' => true]); <ide> $expected = ['p' => ['class' => 'class-name'], '&lt;text&gt;', '/p']; <ide> $this->assertHtml($expected, $result); <add> <add> $result = $this->Html->para('class-name', 'text"', ['escape' => false]); <add> $expected = ['p' => ['class' => 'class-name'], 'text"', '/p']; <add> $this->assertHtml($expected, $result); <ide> } <ide> <ide> /**
2
Mixed
Python
fix debug data [ci skip]
9bb958fd0a117342dfc73b5b784489cc14803168
<ide><path>spacy/cli/debug_data.py <ide> <ide> from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides <ide> from ._util import import_code, debug_cli <del>from ..training import Corpus, Example <add>from ..training import Example <ide> from ..training.initialize import get_sourced_components <ide> from ..schemas import ConfigSchemaTraining <ide> from ..pipeline._parser_internals import nonproj <ide> from ..language import Language <del>from ..util import registry <add>from ..util import registry, resolve_dot_names <ide> from .. import util <ide> <ide> <ide> def debug_data_cli( <ide> # fmt: off <ide> ctx: typer.Context, # This is only used to read additional arguments <del> train_path: Path = Arg(..., help="Location of JSON-formatted training data", exists=True), <del> dev_path: Path = Arg(..., help="Location of JSON-formatted development data", exists=True), <ide> config_path: Path = Arg(..., help="Path to config file", exists=True), <ide> code_path: Optional[Path] = Opt(None, "--code-path", "-c", help="Path to Python file with additional code (registered functions) to be imported"), <ide> ignore_warnings: bool = Opt(False, "--ignore-warnings", "-IW", help="Ignore warnings, only show stats and errors"), <ide> def debug_data_cli( <ide> overrides = parse_config_overrides(ctx.args) <ide> import_code(code_path) <ide> debug_data( <del> train_path, <del> dev_path, <ide> config_path, <ide> config_overrides=overrides, <ide> ignore_warnings=ignore_warnings, <ide> def debug_data_cli( <ide> <ide> <ide> def debug_data( <del> train_path: Path, <del> dev_path: Path, <ide> config_path: Path, <ide> *, <ide> config_overrides: Dict[str, Any] = {}, <ide> def debug_data( <ide> no_print=silent, pretty=not no_format, ignore_warnings=ignore_warnings <ide> ) <ide> # Make sure all files and paths exists if they are needed <del> if not train_path.exists(): <del> msg.fail("Training data not found", train_path, exits=1) <del> if not dev_path.exists(): <del> msg.fail("Development data not found", dev_path, exits=1) <del> if not config_path.exists(): <del> msg.fail("Config file not found", config_path, exists=1) <ide> with show_validation_error(config_path): <ide> cfg = util.load_config(config_path, overrides=config_overrides) <ide> nlp = util.load_model_from_config(cfg) <del> T = registry.resolve( <del> nlp.config.interpolate()["training"], schema=ConfigSchemaTraining <del> ) <add> config = nlp.config.interpolate() <add> T = registry.resolve(config["training"], schema=ConfigSchemaTraining) <ide> # Use original config here, not resolved version <ide> sourced_components = get_sourced_components(cfg) <ide> frozen_components = T["frozen_components"] <ide> def debug_data( <ide> msg.divider("Data file validation") <ide> <ide> # Create the gold corpus to be able to better analyze data <del> loading_train_error_message = "" <del> loading_dev_error_message = "" <del> with msg.loading("Loading corpus..."): <del> try: <del> train_dataset = list(Corpus(train_path)(nlp)) <del> except ValueError as e: <del> loading_train_error_message = f"Training data cannot be loaded: {e}" <del> try: <del> dev_dataset = list(Corpus(dev_path)(nlp)) <del> except ValueError as e: <del> loading_dev_error_message = f"Development data cannot be loaded: {e}" <del> if loading_train_error_message or loading_dev_error_message: <del> if loading_train_error_message: <del> msg.fail(loading_train_error_message) <del> if loading_dev_error_message: <del> msg.fail(loading_dev_error_message) <del> sys.exit(1) <add> dot_names = [T["train_corpus"], T["dev_corpus"]] <add> train_corpus, dev_corpus = resolve_dot_names(config, dot_names) <add> train_dataset = list(train_corpus(nlp)) <add> dev_dataset = list(dev_corpus(nlp)) <ide> msg.good("Corpus is loadable") <ide> <add> nlp.initialize(lambda: train_dataset) <add> msg.good("Pipeline can be initialized with data") <add> <ide> # Create all gold data here to avoid iterating over the train_dataset constantly <ide> gold_train_data = _compile_gold(train_dataset, factory_names, nlp, make_proj=True) <ide> gold_train_unpreprocessed_data = _compile_gold( <ide> def debug_data( <ide> msg.divider("Part-of-speech Tagging") <ide> labels = [label for label in gold_train_data["tags"]] <ide> # TODO: does this need to be updated? <del> tag_map = nlp.vocab.morphology.tag_map <del> msg.info(f"{len(labels)} label(s) in data ({len(tag_map)} label(s) in tag map)") <add> msg.info(f"{len(labels)} label(s) in data") <ide> labels_with_counts = _format_labels( <ide> gold_train_data["tags"].most_common(), counts=True <ide> ) <ide> msg.text(labels_with_counts, show=verbose) <del> non_tagmap = [l for l in labels if l not in tag_map] <del> if not non_tagmap: <del> msg.good(f"All labels present in tag map for language '{nlp.lang}'") <del> for label in non_tagmap: <del> msg.fail(f"Label '{label}' not found in tag map for language '{nlp.lang}'") <ide> <ide> if "parser" in factory_names: <ide> has_low_data_warning = False <ide><path>website/docs/api/cli.md <ide> $ python -m spacy debug data [config_path] [--code] [--ignore-warnings] [--verbo <ide> ``` <ide> =========================== Data format validation =========================== <ide> ✔ Corpus is loadable <add>✔ Pipeline can be initialized with data <ide> <ide> =============================== Training stats =============================== <ide> Training pipeline: tagger, parser, ner <ide> New: 'ORG' (23860), 'PERSON' (21395), 'GPE' (21193), 'DATE' (18080), 'CARDINAL' <ide> ✔ No entities consisting of or starting/ending with whitespace <ide> <ide> =========================== Part-of-speech Tagging =========================== <del>ℹ 49 labels in data (57 labels in tag map) <add>ℹ 49 labels in data <ide> 'NN' (266331), 'IN' (227365), 'DT' (185600), 'NNP' (164404), 'JJ' (119830), <ide> 'NNS' (110957), '.' (101482), ',' (92476), 'RB' (90090), 'PRP' (90081), 'VB' <ide> (74538), 'VBD' (68199), 'CC' (62862), 'VBZ' (50712), 'VBP' (43420), 'VBN' <ide> New: 'ORG' (23860), 'PERSON' (21395), 'GPE' (21193), 'DATE' (18080), 'CARDINAL' <ide> '-RRB-' (2825), '-LRB-' (2788), 'PDT' (2078), 'XX' (1316), 'RBS' (1142), 'FW' <ide> (794), 'NFP' (557), 'SYM' (440), 'WP$' (294), 'LS' (293), 'ADD' (191), 'AFX' <ide> (24) <del>✔ All labels present in tag map for language 'en' <ide> <ide> ============================= Dependency Parsing ============================= <ide> ℹ Found 111703 sentences with an average length of 18.6 words.
2
Javascript
Javascript
write specs for filerecoveryservice
57195d7ba632df65cdbb2362006a2ffb1b01b455
<ide><path>spec/browser/file-recovery-service-spec.js <add>'use babel' <add> <add>import FileRecoveryService from '../../src/browser/file-recovery-service' <add>import temp from 'temp' <add>import fs from 'fs-plus' <add>import path from 'path' <add>import os from 'os' <add>import crypto from 'crypto' <add>import {Emitter} from 'event-kit' <add> <add>describe("FileRecoveryService", () => { <add> let mockWindow, recoveryService, recoveryDirectory <add> <add> beforeEach(() => { <add> mockWindow = new Emitter <add> recoveryDirectory = path.join(os.tmpdir(), crypto.randomBytes(5).toString('hex')) <add> recoveryService = new FileRecoveryService(recoveryDirectory) <add> }) <add> <add> describe("when no crash happens during a save", () => { <add> it("creates a recovery file and deletes it after saving", () => { <add> let filePath = temp.path() <add> <add> fs.writeFileSync(filePath, "some content") <add> recoveryService.willSavePath({sender: mockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 1) <add> <add> fs.writeFileSync(filePath, "changed") <add> recoveryService.didSavePath({sender: mockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> assert.equal(fs.readFileSync(filePath, 'utf8'), "changed") <add> }) <add> <add> it("creates many recovery files and deletes them when many windows attempt to save the same file", () => { <add> const anotherMockWindow = new Emitter <add> let filePath = temp.path() <add> <add> fs.writeFileSync(filePath, "some content") <add> recoveryService.willSavePath({sender: mockWindow}, filePath) <add> recoveryService.willSavePath({sender: anotherMockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 2) <add> <add> fs.writeFileSync(filePath, "changed") <add> recoveryService.didSavePath({sender: mockWindow}, filePath) <add> recoveryService.didSavePath({sender: anotherMockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> assert.equal(fs.readFileSync(filePath, 'utf8'), "changed") <add> }) <add> }) <add> <add> describe("when a crash happens during a save", () => { <add> it("restores the created recovery file and deletes it", () => { <add> let filePath = temp.path() <add> <add> fs.writeFileSync(filePath, "some content") <add> recoveryService.willSavePath({sender: mockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 1) <add> <add> fs.writeFileSync(filePath, "changed") <add> mockWindow.emit("crashed") <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> assert.equal(fs.readFileSync(filePath, 'utf8'), "some content") <add> }) <add> <add> it("restores the created recovery files and deletes them in the order in which windows crash", () => { <add> const anotherMockWindow = new Emitter <add> let filePath = temp.path() <add> <add> fs.writeFileSync(filePath, "window 1") <add> recoveryService.willSavePath({sender: mockWindow}, filePath) <add> fs.writeFileSync(filePath, "window 2") <add> recoveryService.willSavePath({sender: anotherMockWindow}, filePath) <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 2) <add> <add> fs.writeFileSync(filePath, "changed") <add> <add> mockWindow.emit("crashed") <add> assert.equal(fs.readFileSync(filePath, 'utf8'), "window 1") <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 1) <add> <add> anotherMockWindow.emit("crashed") <add> assert.equal(fs.readFileSync(filePath, 'utf8'), "window 2") <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> }) <add> }) <add> <add> it("doesn't create a recovery file when the file that's being saved doesn't exist yet", () => { <add> recoveryService.willSavePath({sender: mockWindow}, "a-file-that-doesnt-exist") <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> <add> recoveryService.didSavePath({sender: mockWindow}, "a-file-that-doesnt-exist") <add> assert.equal(fs.listTreeSync(recoveryDirectory).length, 0) <add> }) <add>}) <ide><path>src/browser/file-recovery-service.js <ide> export default class FileRecoveryService { <ide> } <ide> <ide> const window = event.sender <del> const recoveryFileName = crypto.createHash('sha1').update(path + Date.now().toString(), 'utf8').digest('hex').substring(0, 10) <add> const recoveryFileName = crypto.randomBytes(5).toString('hex') <ide> const recoveryPath = Path.join(this.recoveryDirectory, recoveryFileName) <ide> fs.writeFileSync(recoveryPath, fs.readFileSync(path)) <ide> <ide> export default class FileRecoveryService { <ide> didSavePath (event, path) { <ide> const window = event.sender <ide> const recoveryPathsByFilePath = this.recoveryPathsByWindowAndFilePath.get(window) <del> if (recoveryPathsByFilePath.has(path)) { <add> if (recoveryPathsByFilePath != null && recoveryPathsByFilePath.has(path)) { <ide> const recoveryPath = recoveryPathsByFilePath.get(path) <ide> fs.unlinkSync(recoveryPath) <ide> recoveryPathsByFilePath.delete(path)
2
Javascript
Javascript
throw error when output.filename is missing
b31a8110b31ce2611a17061ed0ab03fc40d4fa90
<ide><path>bin/convert-argv.js <ide> module.exports = function(optimist, argv, convertOptions) { <ide> argv["optimize-minimize"] = true; <ide> } <ide> <add> var configFileLoaded = false; <ide> if(argv.config) { <ide> options = require(path.resolve(argv.config)); <add> configFileLoaded = true; <ide> } else { <ide> var configPath = path.resolve("webpack.config.js"); <ide> if(fs.existsSync(configPath)) { <ide> options = require(configPath); <add> configFileLoaded = true; <ide> } <ide> } <ide> if(typeof options !== "object" || options === null) { <ide> module.exports = function(optimist, argv, convertOptions) { <ide> options.output.filename = argv._.pop(); <ide> options.output.path = path.dirname(options.output.filename); <ide> options.output.filename = path.basename(options.output.filename); <add> } else if(configFileLoaded) { <add> throw new Error("'output.filename' is required, either in config file or as --output-file"); <ide> } else { <ide> optimist.showHelp(); <ide> process.exit(-1);
1
PHP
PHP
add missing casts and improper mocks
66e94d5f9e883f9d3109cefe598252070900cf62
<ide><path>src/Console/Shell.php <ide> public function runCommand(array $argv, bool $autoMethod = false, array $extra = <ide> } <ide> <ide> $subcommands = $this->OptionParser->subcommands(); <del> $method = Inflector::camelize($command); <add> $method = Inflector::camelize((string)$command); <ide> $isMethod = $this->hasMethod($method); <ide> <ide> if ($isMethod && $autoMethod && count($subcommands) === 0) { <ide><path>src/Shell/Task/CommandTask.php <ide> public function getShell($commandName) <ide> } <ide> <ide> $name = Inflector::camelize($name); <del> $pluginDot = Inflector::camelize($pluginDot); <add> $pluginDot = Inflector::camelize((string)$pluginDot); <ide> $class = App::className($pluginDot . $name, 'Shell', 'Shell'); <ide> if (!$class) { <ide> return false; <ide><path>src/View/View.php <ide> public function renderLayout($content, $layout = null) <ide> <ide> $title = $this->Blocks->get('title'); <ide> if ($title === '') { <del> $title = Inflector::humanize($this->templatePath); <add> $title = Inflector::humanize((string)$this->templatePath); <ide> $this->Blocks->set('title', $title); <ide> } <ide> <ide><path>tests/TestCase/ORM/Behavior/TimestampBehaviorTest.php <ide> class TimestampBehaviorTest extends TestCase <ide> */ <ide> public function testImplementedEventsDefault() <ide> { <del> $table = $this->getMockBuilder('Cake\ORM\Table')->getMock(); <add> $table = $this->getTable(); <ide> $this->Behavior = new TimestampBehavior($table); <ide> <ide> $expected = [ <ide> public function testImplementedEventsDefault() <ide> */ <ide> public function testImplementedEventsCustom() <ide> { <del> $table = $this->getMockBuilder('Cake\ORM\Table')->getMock(); <add> $table = $this->getTable(); <ide> $settings = ['events' => ['Something.special' => ['date_specialed' => 'always']]]; <ide> $this->Behavior = new TimestampBehavior($table, $settings); <ide> <ide> protected function getTable() <ide> 'date_specialed' => ['type' => 'datetime'], <ide> 'timestamp_str' => ['type' => 'string'], <ide> ]; <del> $table = new Table(['schema' => $schema]); <ide> <del> return $table; <add> return new Table([ <add> 'alias' => 'Articles', <add> 'schema' => $schema <add> ]); <ide> } <ide> }
4
Mixed
Text
fix tidy_bytes for jruby
ae28e4beb3d9b395ee269999111b6598802da63f
<ide><path>activesupport/CHANGELOG.md <add>* Fix the implementation of Multibyte::Unicode.tidy_bytes for JRuby <add> <add> The existing implementation caused JRuby to raise the error: <add> `Encoding::ConverterNotFoundError: code converter not found (UTF-8 to UTF8-MAC)` <add> <add> *Justin Coyne* <add> <ide> * Fix `to_param` behavior when there are nested empty hashes. <ide> <ide> Before: <ide><path>activesupport/lib/active_support/multibyte/unicode.rb <ide> def tidy_bytes(string, force = false) <ide> # We're going to 'transcode' bytes from UTF-8 when possible, then fall back to <ide> # CP1252 when we get errors. The final string will be 'converted' back to UTF-8 <ide> # before returning. <del> reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_8_MAC) <add> reader = Encoding::Converter.new(Encoding::UTF_8, Encoding::UTF_16LE) <ide> <ide> source = string.dup <del> out = ''.force_encoding(Encoding::UTF_8_MAC) <add> out = ''.force_encoding(Encoding::UTF_16LE) <ide> <ide> loop do <ide> reader.primitive_convert(source, out) <ide> _, _, _, error_bytes, _ = reader.primitive_errinfo <ide> break if error_bytes.nil? <del> out << error_bytes.encode(Encoding::UTF_8_MAC, Encoding::Windows_1252, invalid: :replace, undef: :replace) <add> out << error_bytes.encode(Encoding::UTF_16LE, Encoding::Windows_1252, invalid: :replace, undef: :replace) <ide> end <ide> <ide> reader.finish
2
Javascript
Javascript
remove extra require
182e97a62ca0677ae4940ac5f88199ec73ce6295
<ide><path>local-cli/cli.js <ide> var childProcess = require('child_process'); <ide> var Config = require('./util/Config'); <ide> var defaultConfig = require('./default.config'); <ide> var dependencies = require('./dependencies/dependencies'); <del>var fs = require('fs'); <ide> var generate = require('./generate/generate'); <ide> var library = require('./library/library'); <ide> var link = require('./library/link');
1
Text
Text
fix some broken sourceforge.net links
0e7a1079be5e87aae2abcda7c27a2b0e67270a50
<ide><path>docs/admin/b2d_volume_resize.md <ide> The `boot2docker` command reads its configuration from the `$BOOT2DOCKER_PROFILE <ide> <ide> This solution increases the volume size by first cloning it, then resizing it <ide> using a disk partitioning tool. We recommend <del>[GParted](http://gparted.sourceforge.net/download.php/index.php). The tool comes <add>[GParted](https://sourceforge.net/projects/gparted/files/). The tool comes <ide> as a bootable ISO, is a free download, and works well with VirtualBox. <ide> <ide> 1. Stop Boot2Docker <ide> as a bootable ISO, is a free download, and works well with VirtualBox. <ide> <ide> 5. Download a disk partitioning tool ISO <ide> <del> To resize the volume, we'll use [GParted](http://gparted.sourceforge.net/download.php/). <add> To resize the volume, we'll use [GParted](https://sourceforge.net/projects/gparted/files/). <ide> Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus. <ide> You might need to create the bus before you can add the ISO. <ide> <ide><path>docs/reference/builder.md <ide> The cache for `RUN` instructions can be invalidated by `ADD` instructions. See <ide> For systems that have recent aufs version (i.e., `dirperm1` mount option can <ide> be set), docker will attempt to fix the issue automatically by mounting <ide> the layers with `dirperm1` option. More details on `dirperm1` option can be <del> found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html) <add> found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs) <ide> <ide> If your system doesn't have support for `dirperm1`, the issue describes a workaround. <ide> <ide><path>docs/security/security.md <ide> common Ethernet switch; no more, no less. <ide> How mature is the code providing kernel namespaces and private <ide> networking? Kernel namespaces were introduced [between kernel version <ide> 2.6.15 and <del>2.6.26](http://lxc.sourceforge.net/index.php/about/kernel-namespaces/). <add>2.6.26](http://man7.org/linux/man-pages/man7/namespaces.7.html). <ide> This means that since July 2008 (date of the 2.6.26 release <ide> ), namespace code has been exercised and scrutinized on a large <ide> number of production systems. And there is more: the design and
3
Javascript
Javascript
fix preventdefault for all browsers
83b0e17f3fc1b0ed9993827d2308c344392926d4
<ide><path>src/jquery/jquery.js <ide> jQuery.extend({ <ide> if(jQuery.browser.msie) { <ide> // get real event from window.event <ide> event = window.event; <del> event.preventDefault = function() { <del> this.returnValue = false; <del> }; <del> event.stopPropagation = function() { <del> this.cancelBubble = true; <del> }; <ide> // fix target property <ide> event.target = event.srcElement; <ide> // check safari and if target is a textnode <ide> jQuery.extend({ <ide> // get parentnode from textnode <ide> event.target = event.target.parentNode; <ide> } <add> // fix preventDefault and stopPropagation <add> event.preventDefault = function() { <add> this.returnValue = false; <add> }; <add> event.stopPropagation = function() { <add> this.cancelBubble = true; <add> }; <ide> return event; <ide> } <ide>
1
Javascript
Javascript
send hmr updates only for files on the bundle
b5081abae37f8599ed8902eb8a9770d31bf168ee
<ide><path>local-cli/server/util/attachHMRServer.js <ide> function attachHMRServer({httpServer, path, packagerServer}) { <ide> client = null; <ide> } <ide> <add> // Returns a promise with the full list of dependencies and the shallow <add> // dependencies each file on the dependency list has for the give platform <add> // and entry file. <add> function getDependencies(platform, bundleEntry) { <add> return packagerServer.getDependencies({ <add> platform: platform, <add> dev: true, <add> entryFile: bundleEntry, <add> }).then(response => { <add> // for each dependency builds the object: <add> // `{path: '/a/b/c.js', deps: ['modA', 'modB', ...]}` <add> return Promise.all(Object.values(response.dependencies).map(dep => { <add> if (dep.isAsset() || dep.isAsset_DEPRECATED() || dep.isJSON()) { <add> return Promise.resolve({path: dep.path, deps: []}); <add> } <add> return packagerServer.getShallowDependencies(dep.path) <add> .then(deps => { <add> return { <add> path: dep.path, <add> deps, <add> }; <add> }); <add> })) <add> .then(deps => { <add> // list with all the dependencies the bundle entry has <add> const dependenciesCache = response.dependencies.map(dep => dep.path); <add> <add> // map that indicates the shallow dependency each file included on the <add> // bundle has <add> const shallowDependencies = {}; <add> deps.forEach(dep => shallowDependencies[dep.path] = dep.deps); <add> <add> return {dependenciesCache, shallowDependencies}; <add> }); <add> }); <add> } <add> <ide> packagerServer.addFileChangeListener(filename => { <ide> if (!client) { <ide> return; <ide> } <ide> <del> packagerServer.buildBundleForHMR({ <del> entryFile: filename, <del> platform: client.platform, <del> }) <del> .then(bundle => client.ws.send(bundle)); <add> packagerServer.getShallowDependencies(filename) <add> .then(deps => { <add> // if the file dependencies have change we need to invalidate the <add> // dependencies caches because the list of files we need to send to the <add> // client may have changed <add> if (arrayEquals(deps, client.shallowDependencies[filename])) { <add> return Promise.resolve(); <add> } <add> return getDependencies(client.platform, client.bundleEntry) <add> .then(({dependenciesCache, shallowDependencies}) => { <add> // invalidate caches <add> client.dependenciesCache = dependenciesCache; <add> client.shallowDependencies = shallowDependencies; <add> }); <add> }) <add> .then(() => { <add> // make sure the file was modified is part of the bundle <add> if (!client.shallowDependencies[filename]) { <add> return; <add> } <add> <add> return packagerServer.buildBundleForHMR({ <add> platform: client.platform, <add> entryFile: filename, <add> }) <add> .then(bundle => client.ws.send(bundle)); <add> }) <add> .done(); <ide> }); <ide> <ide> const WebSocketServer = require('ws').Server; <ide> function attachHMRServer({httpServer, path, packagerServer}) { <ide> wss.on('connection', ws => { <ide> console.log('[Hot Module Replacement] Client connected'); <ide> const params = querystring.parse(url.parse(ws.upgradeReq.url).query); <del> client = { <del> ws, <del> platform: params.platform, <del> bundleEntry: params.bundleEntry, <del> }; <del> <del> client.ws.on('error', e => { <del> console.error('[Hot Module Replacement] Unexpected error', e); <del> disconnect(); <del> }); <ide> <del> client.ws.on('close', () => disconnect()); <add> getDependencies(params.platform, params.bundleEntry) <add> .then(({dependenciesCache, shallowDependencies}) => { <add> client = { <add> ws, <add> platform: params.platform, <add> bundleEntry: params.bundleEntry, <add> dependenciesCache, <add> shallowDependencies, <add> }; <add> <add> client.ws.on('error', e => { <add> console.error('[Hot Module Replacement] Unexpected error', e); <add> disconnect(); <add> }); <add> <add> client.ws.on('close', () => disconnect()); <add> }) <add> .done(); <ide> }); <ide> } <ide> <add>function arrayEquals(arrayA, arrayB) { <add> arrayA = arrayA || []; <add> arrayB = arrayB || []; <add> return ( <add> arrayA.length === arrayB.length && <add> arrayA.every((element, index) => { <add> return element === arrayB[index]; <add> }) <add> ); <add>} <add> <ide> module.exports = attachHMRServer; <ide><path>packager/react-packager/src/Bundler/index.js <ide> class Bundler { <ide> this._transformer.invalidateFile(filePath); <ide> } <ide> <add> getShallowDependencies(entryFile) { <add> return this._resolver.getShallowDependencies(entryFile); <add> } <add> <ide> getDependencies(main, isDev, platform) { <ide> return this._resolver.getDependencies(main, { dev: isDev, platform }); <ide> } <ide><path>packager/react-packager/src/DependencyResolver/DependencyGraph/index.js <ide> class DependencyGraph { <ide> return this._loading; <ide> } <ide> <add> /** <add> * Returns a promise with the direct dependencies the module associated to <add> * the given entryPath has. <add> */ <add> getShallowDependencies(entryPath) { <add> return this._moduleCache.getModule(entryPath).getDependencies(); <add> } <add> <ide> getDependencies(entryPath, platform) { <ide> return this.load().then(() => { <ide> platform = this._getRequestPlatform(entryPath, platform); <ide><path>packager/react-packager/src/Resolver/index.js <ide> class Resolver { <ide> this._polyfillModuleNames = opts.polyfillModuleNames || []; <ide> } <ide> <add> getShallowDependencies(entryFile) { <add> return this._depGraph.getShallowDependencies(entryFile); <add> } <add> <ide> getDependencies(main, options) { <ide> const opts = getDependenciesValidateOpts(options); <ide> <ide><path>packager/react-packager/src/Server/index.js <ide> class Server { <ide> }); <ide> } <ide> <add> getShallowDependencies(entryFile) { <add> return this._bundler.getShallowDependencies(entryFile); <add> } <add> <ide> getDependencies(options) { <ide> return Promise.resolve().then(() => { <ide> if (!options.platform) {
5
PHP
PHP
add tests for new constructor behavior
495702fd7074aabfe0c3a73d40e78787890aa5b7
<ide><path>tests/TestCase/Controller/Component/RequestHandlerComponentTest.php <ide> public function testCheckNotModifiedNoInfo() <ide> $RequestHandler->response->expects($this->never())->method('notModified'); <ide> $this->assertNull($RequestHandler->beforeRender($event, '', $RequestHandler->response)); <ide> } <add> <add> /** <add> * Test default options in construction <add> * <add> * @return void <add> */ <add> public function testConstructDefaultOptions() <add> { <add> $requestHandler = new RequestHandlerComponent($this->Controller->components()); <add> $viewClass = $requestHandler->config('viewClassMap'); <add> $expected = [ <add> 'json' => 'Json', <add> 'xml' => 'Xml', <add> 'ajax' => 'Ajax', <add> ]; <add> $this->assertEquals($expected, $viewClass); <add> <add> $inputs = $requestHandler->config('inputTypeMap'); <add> $this->assertArrayHasKey('json', $inputs); <add> $this->assertArrayHasKey('xml', $inputs); <add> } <add> <add> /** <add> * Test options in constructor replace defaults <add> * <add> * @return void <add> */ <add> public function testConstructReplaceOptions() <add> { <add> $requestHandler = new RequestHandlerComponent( <add> $this->Controller->components(), <add> [ <add> 'viewClassMap' => ['json' => 'Json'], <add> 'inputTypeMap' => ['json' => ['json_decode', true]] <add> ] <add> ); <add> $viewClass = $requestHandler->config('viewClassMap'); <add> $expected = [ <add> 'json' => 'Json', <add> ]; <add> $this->assertEquals($expected, $viewClass); <add> <add> $inputs = $requestHandler->config('inputTypeMap'); <add> $this->assertArrayHasKey('json', $inputs); <add> $this->assertCount(1, $inputs); <add> } <ide> }
1
Java
Java
add marble diagram to the single.never method
e29b57b666954d37129da17430e592631ff17a79
<ide><path>src/main/java/io/reactivex/Single.java <ide> public static <T> Flowable<T> mergeDelayError( <ide> <ide> /** <ide> * Returns a singleton instance of a never-signalling Single (only calls onSubscribe). <add> * <p> <add> * <img width="640" height="244" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Single.never.png" alt=""> <ide> * <dl> <ide> * <dt><b>Scheduler:</b></dt> <ide> * <dd>{@code never} does not operate by default on a particular {@link Scheduler}.</dd> <ide> public final T blockingGet() { <ide> * Example: <ide> * <pre><code> <ide> * // Step 1: Create the consumer type that will be returned by the SingleOperator.apply(): <del> * <add> * <ide> * public final class CustomSingleObserver&lt;T&gt; implements SingleObserver&lt;T&gt;, Disposable { <ide> * <ide> * // The downstream's SingleObserver that will receive the onXXX events
1
PHP
PHP
return queue calls
26a13b52396860c695f05d826b044dea3b8d109c
<ide><path>src/Illuminate/Mail/Mailer.php <ide> public function send($view, array $data, $callback) <ide> * @param array $data <ide> * @param \Closure|string $callback <ide> * @param string $queue <del> * @return void <add> * @return mixed <ide> */ <ide> public function queue($view, array $data, $callback, $queue = null) <ide> { <ide> $callback = $this->buildQueueCallable($callback); <ide> <del> $this->queue->push('mailer@handleQueuedMessage', compact('view', 'data', 'callback'), $queue); <add> return $this->queue->push('mailer@handleQueuedMessage', compact('view', 'data', 'callback'), $queue); <ide> } <ide> <ide> /** <ide> public function queue($view, array $data, $callback, $queue = null) <ide> * @param string|array $view <ide> * @param array $data <ide> * @param \Closure|string $callback <del> * @return void <add> * @return mixed <ide> */ <ide> public function queueOn($queue, $view, array $data, $callback) <ide> { <del> $this->queue($view, $data, $callback, $queue); <add> return $this->queue($view, $data, $callback, $queue); <ide> } <ide> <ide> /** <ide> public function queueOn($queue, $view, array $data, $callback) <ide> * @param array $data <ide> * @param \Closure|string $callback <ide> * @param string $queue <del> * @return void <add> * @return mixed <ide> */ <ide> public function later($delay, $view, array $data, $callback, $queue = null) <ide> { <ide> $callback = $this->buildQueueCallable($callback); <ide> <del> $this->queue->later($delay, 'mailer@handleQueuedMessage', compact('view', 'data', 'callback'), $queue); <add> return $this->queue->later($delay, 'mailer@handleQueuedMessage', compact('view', 'data', 'callback'), $queue); <ide> } <ide> <ide> /** <ide> public function later($delay, $view, array $data, $callback, $queue = null) <ide> * @param string|array $view <ide> * @param array $data <ide> * @param \Closure|string $callback <del> * @return void <add> * @return mixed <ide> */ <ide> public function laterOn($queue, $delay, $view, array $data, $callback) <ide> { <del> $this->later($delay, $view, $data, $callback, $queue); <add> return $this->later($delay, $view, $data, $callback, $queue); <ide> } <ide> <ide> /**
1
Javascript
Javascript
remove experimental from module type name
955d5689a93d34375368ca1b821f554dc6e905cc
<ide><path>examples/wasm-complex/webpack.config.js <ide> module.exports = { <ide> { <ide> test: /\.wat$/, <ide> use: "wast-loader", <del> type: "webassembly/async-experimental" <add> type: "webassembly/async" <ide> } <ide> ] <ide> }, <ide><path>examples/wasm-simple/webpack.config.js <ide> module.exports = { <ide> rules: [ <ide> { <ide> test: /\.wasm$/, <del> type: "webassembly/async-experimental" <add> type: "webassembly/async" <ide> } <ide> ] <ide> }, <ide><path>lib/WebpackOptionsDefaulter.js <ide> class WebpackOptionsDefaulter extends OptionsDefaulter { <ide> }, <ide> options.experiments.syncWebAssembly && { <ide> test: /\.wasm$/i, <del> type: "webassembly/experimental" <add> type: "webassembly/sync" <ide> }, <ide> options.experiments.asyncWebAssembly && { <ide> test: /\.wasm$/i, <del> type: "webassembly/async-experimental" <add> type: "webassembly/async" <ide> } <ide> ].filter(Boolean) <ide> ); <ide><path>lib/debug/ProfilingPlugin.js <ide> const interceptAllParserHooks = (moduleFactory, tracer) => { <ide> "javascript/dynamic", <ide> "javascript/esm", <ide> "json", <del> "webassembly/async-experimental", <del> "webassembly/experimental" <add> "webassembly/async", <add> "webassembly/sync" <ide> ]; <ide> <ide> moduleTypes.forEach(moduleType => { <ide><path>lib/node/ReadFileCompileAsyncWasmPlugin.js <ide> class ReadFileCompileAsyncWasmPlugin { <ide> if ( <ide> !chunkGraph.hasModuleInGraph( <ide> chunk, <del> m => m.type === "webassembly/async-experimental" <add> m => m.type === "webassembly/async" <ide> ) <ide> ) { <ide> return; <ide><path>lib/node/ReadFileCompileWasmPlugin.js <ide> class ReadFileCompileWasmPlugin { <ide> if ( <ide> !chunkGraph.hasModuleInGraph( <ide> chunk, <del> m => m.type === "webassembly/experimental" <add> m => m.type === "webassembly/sync" <ide> ) <ide> ) { <ide> return; <ide><path>lib/wasm-async/AsyncWebAssemblyModulesPlugin.js <ide> class AsyncWebAssemblyModulesPlugin { <ide> ); <ide> <ide> normalModuleFactory.hooks.createParser <del> .for("webassembly/async-experimental") <add> .for("webassembly/async") <ide> .tap("AsyncWebAssemblyModulesPlugin", () => { <ide> return new AsyncWebAssemblyParser(); <ide> }); <ide> normalModuleFactory.hooks.createGenerator <del> .for("webassembly/async-experimental") <add> .for("webassembly/async") <ide> .tap("AsyncWebAssemblyModulesPlugin", () => { <ide> return Generator.byType({ <ide> javascript: new AsyncWebAssemblyJavascriptGenerator( <ide> class AsyncWebAssemblyModulesPlugin { <ide> chunk, <ide> compareModulesById(chunkGraph) <ide> )) { <del> if (module.type === "webassembly/async-experimental") { <add> if (module.type === "webassembly/async") { <ide> const filenameTemplate = outputOptions.webassemblyModuleFilename; <ide> <ide> result.push({ <ide><path>lib/wasm/WebAssemblyModulesPlugin.js <ide> class WebAssemblyModulesPlugin { <ide> ); <ide> <ide> normalModuleFactory.hooks.createParser <del> .for("webassembly/experimental") <add> .for("webassembly/sync") <ide> .tap("WebAssemblyModulesPlugin", () => { <ide> return new WebAssemblyParser(); <ide> }); <ide> <ide> normalModuleFactory.hooks.createGenerator <del> .for("webassembly/experimental") <add> .for("webassembly/sync") <ide> .tap("WebAssemblyModulesPlugin", () => { <ide> return Generator.byType({ <ide> javascript: new WebAssemblyJavascriptGenerator(), <ide> class WebAssemblyModulesPlugin { <ide> chunk, <ide> compareModulesById(chunkGraph) <ide> )) { <del> if (module.type === "webassembly/experimental") { <add> if (module.type === "webassembly/sync") { <ide> const filenameTemplate = <ide> outputOptions.webassemblyModuleFilename; <ide> <ide> class WebAssemblyModulesPlugin { <ide> for (const chunk of compilation.chunks) { <ide> if (chunk.canBeInitial()) { <ide> for (const module of chunkGraph.getChunkModulesIterable(chunk)) { <del> if (module.type === "webassembly/experimental") { <add> if (module.type === "webassembly/sync") { <ide> initialWasmModules.add(module); <ide> } <ide> } <ide><path>lib/web/FetchCompileAsyncWasmPlugin.js <ide> class FetchCompileAsyncWasmPlugin { <ide> if ( <ide> !chunkGraph.hasModuleInGraph( <ide> chunk, <del> m => m.type === "webassembly/async-experimental" <add> m => m.type === "webassembly/async" <ide> ) <ide> ) { <ide> return; <ide><path>lib/web/FetchCompileWasmPlugin.js <ide> class FetchCompileWasmPlugin { <ide> if ( <ide> !chunkGraph.hasModuleInGraph( <ide> chunk, <del> m => m.type === "webassembly/experimental" <add> m => m.type === "webassembly/sync" <ide> ) <ide> ) { <ide> return; <ide><path>test/TestCases.template.js <ide> const describeCases = config => { <ide> { <ide> test: /\.wat$/i, <ide> loader: "wast-loader", <del> type: "webassembly/async-experimental" <add> type: "webassembly/async" <ide> } <ide> ] <ide> }, <ide><path>test/configCases/wasm/identical/webpack.config.js <ide> module.exports = { <ide> { <ide> test: /\.wat$/, <ide> loader: "wast-loader", <del> type: "webassembly/async-experimental" <add> type: "webassembly/async" <ide> } <ide> ] <ide> }, <ide><path>test/configCases/wasm/wasm-in-initial-chunk-error/webpack.config.js <ide> module.exports = { <ide> { <ide> test: /\.wat$/, <ide> loader: "wast-loader", <del> type: "webassembly/experimental" <add> type: "webassembly/sync" <ide> } <ide> ] <ide> },
13
PHP
PHP
set default value for argument
e1213d5449c27c3b0a16c073b7ab96f37e54508a
<ide><path>src/Datasource/EntityInterface.php <ide> public function getVirtual(): array; <ide> * <ide> * @param string $field the field to set or check status for <ide> * @param bool $isDirty true means the field was changed, false means <del> * it was not changed <add> * it was not changed. Default true. <ide> * @return $this <ide> */ <del> public function setDirty(string $field, bool $isDirty); <add> public function setDirty(string $field, bool $isDirty = true); <ide> <ide> /** <ide> * Checks if the entity is dirty or if a single field of it is dirty.
1
Ruby
Ruby
add resolved_formulae method
60383be03292c6996f16b24b46699741382e8db4
<ide><path>Library/Homebrew/extend/ARGV.rb <ide> def formulae <ide> @formulae ||= (downcased_unique_named - casks).map { |name| Formulary.factory(name, spec) } <ide> end <ide> <add> def resolved_formulae <add> require "formula" <add> @resolved_formulae ||= (downcased_unique_named - casks).map do |name| <add> if name.include?("/") <add> Formulary.factory(name, spec) <add> else <add> Formulary.from_rack(HOMEBREW_CELLAR/name, spec) <add> end <add> end <add> end <add> <ide> def casks <ide> @casks ||= downcased_unique_named.grep HOMEBREW_CASK_TAP_FORMULA_REGEX <ide> end
1