content_type
stringclasses
8 values
main_lang
stringclasses
7 values
message
stringlengths
1
50
sha
stringlengths
40
40
patch
stringlengths
52
962k
file_count
int64
1
300
Text
Text
add summary on sshd drives
e90710ad4640668fa69aa41165ff9da926b3e0c8
<ide><path>client/src/pages/guide/english/computer-hardware/hard-drives/index.md <ide> SSDs are commonly used as boot drives or storage for the most used applications <ide> <ide> While most Hard Drives use either a SATA or SAS connector Solid State Drives often use other connections that can handle higher bandwidth and lower latencies, with the most notable being PCI Express (PCI-e) where form factors such as M.2 or U.2 dominate. Though other form factors are available, such as Intel's 'Yard stick' form factor or PCI-e cards that look quite similar to a low end graphics card. <ide> <add>## Solid State Hard Drives (SSHD) a.k.a Hybrid Drives <add>Solid State Hard Drives fill a specific gap inbetween Solid State Drives and traditional hard drives. It combines the relative affordable cost of cheap magnetic storage in traditional drives and pairs it with a smaller capacity Solid State Drive with the intent of using the SSD portion to cache frequently used data to increase performance over a plain traditional hard drive at a marginal cost. Hence the combination of the two technologies creates a "hybrid device" that is cost effectively but still is able to benenfit from the high performance of SSD drives primarily for low intensity workloads that mostly utilize read requests from the drive. <add> <ide> #### More Information: <ide> <ide> * [Wikipedia - Hard Disk Drive](https://en.wikipedia.org/wiki/Hard_disk_drive)
1
Text
Text
add missing space
ceca4109fac5367f7c354b675cadf086dc1e09c3
<ide><path>docs/docker-hub/builds.md <ide> The following parameters are recognized in callback data: <ide> ## Repository links <ide> <ide> Repository links are a way to associate one Automated Build with <del>another. If one gets updated,the linking system triggers a rebuild <add>another. If one gets updated, the linking system triggers a rebuild <ide> for the other Automated Build. This makes it easy to keep all your <ide> Automated Builds up to date. <ide>
1
Python
Python
fix attributeerror regression in
1901ea8594185c015d1518d89f3b90180275c0b9
<ide><path>celery/beat.py <ide> def _evaluate_entry_args(entry_args): <ide> return [] <ide> return [ <ide> v() if isinstance(v, BeatLazyFunc) else v <del> for v in entry_args.args <add> for v in entry_args <ide> ] <ide> <ide> <ide><path>t/unit/app/test_beat.py <ide> def foo(): <ide> scheduler.apply_async(entry, advance=False) <ide> foo.apply_async.assert_called() <ide> <add> def test_apply_async_without_null_args(self): <add> <add> @self.app.task(shared=False) <add> def foo(moo: int): <add> return moo <add> foo.apply_async = Mock(name='foo.apply_async') <add> <add> scheduler = mScheduler(app=self.app) <add> entry = scheduler.Entry(task=foo.name, app=self.app, args=None, <add> kwargs=None) <add> entry.args = (101,) <add> entry.kwargs = None <add> <add> scheduler.apply_async(entry, advance=False) <add> foo.apply_async.assert_called() <add> assert foo.apply_async.call_args[0][0] == [101] <add> <ide> def test_should_sync(self): <ide> <ide> @self.app.task(shared=False)
2
Java
Java
add requestpath to http/server/reactive
2d17411ec4b3f0b973264ab270689b8f578d3e52
<ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/DefaultRequestPath.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.http.server.reactive; <add> <add>import java.net.URI; <add>import java.nio.charset.Charset; <add>import java.util.ArrayList; <add>import java.util.Collections; <add>import java.util.List; <add> <add>import org.springframework.util.Assert; <add>import org.springframework.util.CollectionUtils; <add>import org.springframework.util.LinkedMultiValueMap; <add>import org.springframework.util.MultiValueMap; <add>import org.springframework.util.StringUtils; <add> <add>/** <add> * <add> * @author Rossen Stoyanchev <add> * @since 5.0 <add> */ <add>class DefaultRequestPath implements RequestPath { <add> <add> private static final MultiValueMap<String, String> EMPTY_MAP = new LinkedMultiValueMap<>(0); <add> <add> private static final PathSegment EMPTY_PATH_SEGMENT = new DefaultPathSegment("", "", "", EMPTY_MAP); <add> <add> private static final PathSegmentContainer EMPTY_PATH = <add> new DefaultPathSegmentContainer("", Collections.emptyList()); <add> <add> private static final PathSegmentContainer ROOT_PATH = <add> new DefaultPathSegmentContainer("/", Collections.singletonList(EMPTY_PATH_SEGMENT)); <add> <add> <add> private final PathSegmentContainer fullPath; <add> <add> private final PathSegmentContainer contextPath; <add> <add> private final PathSegmentContainer pathWithinApplication; <add> <add> <add> DefaultRequestPath(URI uri, String contextPath, Charset charset) { <add> this.fullPath = parsePath(uri.getRawPath(), charset); <add> this.contextPath = initContextPath(this.fullPath, contextPath); <add> this.pathWithinApplication = initPathWithinApplication(this.fullPath, this.contextPath); <add> } <add> <add> <add> private static PathSegmentContainer parsePath(String path, Charset charset) { <add> path = StringUtils.hasText(path) ? path : ""; <add> if ("".equals(path)) { <add> return EMPTY_PATH; <add> } <add> if ("/".equals(path)) { <add> return ROOT_PATH; <add> } <add> List<PathSegment> result = new ArrayList<>(); <add> int begin = 1; <add> while (true) { <add> int end = path.indexOf('/', begin); <add> String segment = (end != -1 ? path.substring(begin, end) : path.substring(begin)); <add> result.add(parsePathSegment(segment, charset)); <add> if (end == -1) { <add> break; <add> } <add> begin = end + 1; <add> if (begin == path.length()) { <add> // trailing slash <add> result.add(EMPTY_PATH_SEGMENT); <add> break; <add> } <add> } <add> return new DefaultPathSegmentContainer(path, result); <add> } <add> <add> private static PathSegment parsePathSegment(String input, Charset charset) { <add> if ("".equals(input)) { <add> return EMPTY_PATH_SEGMENT; <add> } <add> int index = input.indexOf(';'); <add> if (index == -1) { <add> return new DefaultPathSegment(input, StringUtils.uriDecode(input, charset), "", EMPTY_MAP); <add> } <add> String value = input.substring(0, index); <add> String valueDecoded = StringUtils.uriDecode(value, charset); <add> String semicolonContent = input.substring(index); <add> MultiValueMap<String, String> parameters = parseParams(semicolonContent, charset); <add> return new DefaultPathSegment(value, valueDecoded, semicolonContent, parameters); <add> } <add> <add> private static MultiValueMap<String, String> parseParams(String input, Charset charset) { <add> MultiValueMap<String, String> result = new LinkedMultiValueMap<>(); <add> int begin = 1; <add> while (begin < input.length()) { <add> int end = input.indexOf(';', begin); <add> String param = (end != -1 ? input.substring(begin, end) : input.substring(begin)); <add> parseParamValues(param, charset, result); <add> if (end == -1) { <add> break; <add> } <add> begin = end + 1; <add> } <add> return result; <add> } <add> <add> private static void parseParamValues(String input, Charset charset, MultiValueMap<String, String> output) { <add> if (StringUtils.hasText(input)) { <add> int index = input.indexOf("="); <add> if (index != -1) { <add> String name = input.substring(0, index); <add> String value = input.substring(index + 1); <add> for (String v : StringUtils.commaDelimitedListToStringArray(value)) { <add> name = StringUtils.uriDecode(name, charset); <add> if (StringUtils.hasText(name)) { <add> output.add(name, StringUtils.uriDecode(v, charset)); <add> } <add> } <add> } <add> else { <add> String name = StringUtils.uriDecode(input, charset); <add> if (StringUtils.hasText(name)) { <add> output.add(input, ""); <add> } <add> } <add> } <add> } <add> <add> private static PathSegmentContainer initContextPath(PathSegmentContainer path, String contextPath) { <add> if (!StringUtils.hasText(contextPath) || "/".equals(contextPath)) { <add> return EMPTY_PATH; <add> } <add> <add> Assert.isTrue(contextPath.startsWith("/") && !contextPath.endsWith("/") && <add> path.value().startsWith(contextPath), "Invalid contextPath: " + contextPath); <add> <add> int length = contextPath.length(); <add> int counter = 0; <add> <add> List<PathSegment> result = new ArrayList<>(); <add> for (PathSegment pathSegment : path.pathSegments()) { <add> result.add(pathSegment); <add> counter += 1; // for '/' separators <add> counter += pathSegment.value().length(); <add> counter += pathSegment.semicolonContent().length(); <add> if (length == counter) { <add> return new DefaultPathSegmentContainer(contextPath, result); <add> } <add> } <add> <add> // Should not happen.. <add> throw new IllegalStateException("Failed to initialize contextPath='" + contextPath + "'" + <add> " given path='" + path.value() + "'"); <add> } <add> <add> private static PathSegmentContainer initPathWithinApplication(PathSegmentContainer path, <add> PathSegmentContainer contextPath) { <add> <add> String value = path.value().substring(contextPath.value().length()); <add> List<PathSegment> pathSegments = new ArrayList<>(path.pathSegments()); <add> pathSegments.removeAll(contextPath.pathSegments()); <add> return new DefaultPathSegmentContainer(value, pathSegments); <add> } <add> <add> <add> @Override <add> public String value() { <add> return this.fullPath.value(); <add> } <add> <add> @Override <add> public List<PathSegment> pathSegments() { <add> return this.fullPath.pathSegments(); <add> } <add> <add> @Override <add> public PathSegmentContainer contextPath() { <add> return this.contextPath; <add> } <add> <add> @Override <add> public PathSegmentContainer pathWithinApplication() { <add> return this.pathWithinApplication; <add> } <add> <add> <add> private static class DefaultPathSegmentContainer implements PathSegmentContainer { <add> <add> private final String path; <add> <add> private final List<PathSegment> pathSegments; <add> <add> <add> DefaultPathSegmentContainer(String path, List<PathSegment> pathSegments) { <add> this.path = path; <add> this.pathSegments = Collections.unmodifiableList(pathSegments); <add> } <add> <add> <add> @Override <add> public String value() { <add> return this.path; <add> } <add> <add> @Override <add> public List<PathSegment> pathSegments() { <add> return this.pathSegments; <add> } <add> <add> <add> @Override <add> public boolean equals(Object other) { <add> if (this == other) { <add> return true; <add> } <add> if (other == null || getClass() != other.getClass()) { <add> return false; <add> } <add> return this.path.equals(((DefaultPathSegmentContainer) other).path); <add> } <add> <add> @Override <add> public int hashCode() { <add> return this.path.hashCode(); <add> } <add> <add> @Override <add> public String toString() { <add> return "[path='" + this.path + "\']"; <add> } <add> } <add> <add> <add> private static class DefaultPathSegment implements PathSegment { <add> <add> private final String value; <add> <add> private final String valueDecoded; <add> <add> private final String semicolonContent; <add> <add> private final MultiValueMap<String, String> parameters; <add> <add> <add> DefaultPathSegment(String value, String valueDecoded, String semicolonContent, <add> MultiValueMap<String, String> params) { <add> <add> this.value = value; <add> this.valueDecoded = valueDecoded; <add> this.semicolonContent = semicolonContent; <add> this.parameters = CollectionUtils.unmodifiableMultiValueMap(params); <add> } <add> <add> <add> @Override <add> public String value() { <add> return this.value; <add> } <add> <add> @Override <add> public String valueDecoded() { <add> return this.valueDecoded; <add> } <add> <add> @Override <add> public String semicolonContent() { <add> return this.semicolonContent; <add> } <add> <add> @Override <add> public MultiValueMap<String, String> parameters() { <add> return this.parameters; <add> } <add> <add> <add> @Override <add> public boolean equals(Object other) { <add> if (this == other) { <add> return true; <add> } <add> if (other == null || getClass() != other.getClass()) { <add> return false; <add> } <add> <add> DefaultPathSegment segment = (DefaultPathSegment) other; <add> return (this.value.equals(segment.value) && <add> this.semicolonContent.equals(segment.semicolonContent) && <add> this.parameters.equals(segment.parameters)); <add> } <add> <add> @Override <add> public int hashCode() { <add> int result = this.value.hashCode(); <add> result = 31 * result + this.semicolonContent.hashCode(); <add> result = 31 * result + this.parameters.hashCode(); <add> return result; <add> } <add> <add> public String toString() { <add> return "[value='" + this.value + "\', " + <add> "semicolonContent='" + this.semicolonContent + "\', " + <add> "parameters=" + this.parameters + "']"; <add> } <add> } <add> <add>} <ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/PathSegment.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.http.server.reactive; <add> <add>import org.springframework.util.MultiValueMap; <add> <add>/** <add> * Represents the content of one path segment. <add> * <add> * @author Rossen Stoyanchev <add> * @since 5.0 <add> */ <add>public interface PathSegment { <add> <add> /** <add> * Return the original, raw (encoded) path segment value not including <add> * path parameters. <add> */ <add> String value(); <add> <add> /** <add> * The path {@link #value()} decoded. <add> */ <add> String valueDecoded(); <add> <add> /** <add> * Return the portion of the path segment after and including the first <add> * ";" (semicolon) representing path parameters. The actual parsed <add> * parameters if any can be obtained via {@link #parameters()}. <add> */ <add> String semicolonContent(); <add> <add> /** <add> * Path parameters parsed from the path segment. <add> */ <add> MultiValueMap<String, String> parameters(); <add> <add>} <ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/PathSegmentContainer.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.http.server.reactive; <add> <add>import java.util.List; <add> <add>/** <add> * Container for 0..N path segments. <add> * <add> * @author Rossen Stoyanchev <add> * @since 5.0 <add> */ <add>public interface PathSegmentContainer { <add> <add> /** <add> * The original, raw (encoded) path value including path parameters. <add> */ <add> String value(); <add> <add> /** <add> * The list of path segments contained. <add> */ <add> List<PathSegment> pathSegments(); <add> <add>} <ide><path>spring-web/src/main/java/org/springframework/http/server/reactive/RequestPath.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.http.server.reactive; <add> <add>/** <add> * Represents the complete path for a request. <add> * <add> * @author Rossen Stoyanchev <add> * @since 5.0 <add> */ <add>public interface RequestPath extends PathSegmentContainer { <add> <add> /** <add> * The contextPath portion of the request if any. <add> */ <add> PathSegmentContainer contextPath(); <add> <add> /** <add> * The portion of the request path after the context path. <add> */ <add> PathSegmentContainer pathWithinApplication(); <add> <add>} <ide><path>spring-web/src/test/java/org/springframework/http/server/reactive/DefaultRequestPathTests.java <add>/* <add> * Copyright 2002-2017 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * http://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add>package org.springframework.http.server.reactive; <add> <add>import java.net.URI; <add>import java.nio.charset.StandardCharsets; <add>import java.util.Arrays; <add>import java.util.Collections; <add>import java.util.List; <add>import java.util.stream.Collectors; <add> <add>import org.junit.Test; <add> <add>import org.springframework.util.LinkedMultiValueMap; <add>import org.springframework.util.MultiValueMap; <add> <add>import static org.junit.Assert.assertEquals; <add> <add>/** <add> * Unit tests for {@link DefaultRequestPath}. <add> * @author Rossen Stoyanchev <add> */ <add>public class DefaultRequestPathTests { <add> <add> @Test <add> public void pathSegment() throws Exception { <add> // basic <add> testPathSegment("cars", "", "cars", "cars", new LinkedMultiValueMap<>()); <add> <add> // empty <add> testPathSegment("", "", "", "", new LinkedMultiValueMap<>()); <add> <add> // spaces <add> testPathSegment("%20", "", "%20", " ", new LinkedMultiValueMap<>()); <add> testPathSegment("%20a%20", "", "%20a%20", " a ", new LinkedMultiValueMap<>()); <add> } <add> <add> @Test <add> public void pathSegmentWithParams() throws Exception { <add> // basic <add> LinkedMultiValueMap<String, String> params = new LinkedMultiValueMap<>(); <add> params.add("colors", "red"); <add> params.add("colors", "blue"); <add> params.add("colors", "green"); <add> params.add("year", "2012"); <add> testPathSegment("cars", ";colors=red,blue,green;year=2012", "cars", "cars", params); <add> <add> // trailing semicolon <add> params = new LinkedMultiValueMap<>(); <add> params.add("p", "1"); <add> testPathSegment("path", ";p=1;", "path", "path", params); <add> <add> // params with spaces <add> params = new LinkedMultiValueMap<>(); <add> params.add("param name", "param value"); <add> testPathSegment("path", ";param%20name=param%20value;%20", "path", "path", params); <add> <add> // empty params <add> params = new LinkedMultiValueMap<>(); <add> params.add("p", "1"); <add> testPathSegment("path", ";;;%20;%20;p=1;%20", "path", "path", params); <add> } <add> <add> @Test <add> public void path() throws Exception { <add> // basic <add> testPath("/a/b/c", "/a/b/c", Arrays.asList("a", "b", "c")); <add> <add> // root path <add> testPath("/%20", "/%20", Collections.singletonList("%20")); <add> testPath("", "", Collections.emptyList()); <add> testPath("%20", "", Collections.emptyList()); <add> <add> // trailing slash <add> testPath("/a/b/", "/a/b/", Arrays.asList("a", "b", "")); <add> testPath("/a/b//", "/a/b//", Arrays.asList("a", "b", "", "")); <add> <add> // extra slashes ande spaces <add> testPath("//%20/%20", "//%20/%20", Arrays.asList("", "%20", "%20")); <add> } <add> <add> @Test <add> public void contextPath() throws Exception { <add> URI uri = URI.create("http://localhost:8080/app/a/b/c"); <add> RequestPath path = new DefaultRequestPath(uri, "/app", StandardCharsets.UTF_8); <add> <add> PathSegmentContainer contextPath = path.contextPath(); <add> assertEquals("/app", contextPath.value()); <add> assertEquals(Collections.singletonList("app"), pathSegmentValues(contextPath)); <add> <add> PathSegmentContainer pathWithinApplication = path.pathWithinApplication(); <add> assertEquals("/a/b/c", pathWithinApplication.value()); <add> assertEquals(Arrays.asList("a", "b", "c"), pathSegmentValues(pathWithinApplication)); <add> } <add> <add> <add> private void testPathSegment(String pathSegment, String semicolonContent, <add> String value, String valueDecoded, MultiValueMap<String, String> parameters) { <add> <add> URI uri = URI.create("http://localhost:8080/" + pathSegment + semicolonContent); <add> PathSegment segment = new DefaultRequestPath(uri, "", StandardCharsets.UTF_8).pathSegments().get(0); <add> <add> assertEquals(value, segment.value()); <add> assertEquals(valueDecoded, segment.valueDecoded()); <add> assertEquals(semicolonContent, segment.semicolonContent()); <add> assertEquals(parameters, segment.parameters()); <add> } <add> <add> private void testPath(String input, String value, List<String> segments) { <add> URI uri = URI.create("http://localhost:8080" + input); <add> RequestPath path = new DefaultRequestPath(uri, "", StandardCharsets.UTF_8); <add> <add> assertEquals(value, path.value()); <add> assertEquals(segments, pathSegmentValues(path)); <add> } <add> <add> private static List<String> pathSegmentValues(PathSegmentContainer path) { <add> return path.pathSegments().stream().map(PathSegment::value).collect(Collectors.toList()); <add> } <add> <add>}
5
Python
Python
add tokenizer exceptions for ing verbs
c0691b2ab4a95d88c760a389c64fa76605cb2a59
<ide><path>spacy/lang/en/tokenizer_exceptions.py <ide> {ORTH: "O'clock", LEMMA: "o'clock", NORM: "o'clock"}, <ide> {ORTH: "lovin'", LEMMA: "love", NORM: "loving"}, <ide> {ORTH: "Lovin'", LEMMA: "love", NORM: "loving"}, <add> {ORTH: "lovin", LEMMA: "love", NORM: "loving"}, <add> {ORTH: "Lovin", LEMMA: "love", NORM: "loving"}, <add> {ORTH: "havin'", LEMMA: "have", NORM: "having"}, <add> {ORTH: "Havin'", LEMMA: "have", NORM: "having"}, <add> {ORTH: "havin", LEMMA: "have", NORM: "having"}, <add> {ORTH: "Havin", LEMMA: "have", NORM: "having"}, <add> {ORTH: "doin'", LEMMA: "do", NORM: "doing"}, <add> {ORTH: "Doin'", LEMMA: "do", NORM: "doing"}, <add> {ORTH: "doin", LEMMA: "do", NORM: "doing"}, <add> {ORTH: "Doin", LEMMA: "do", NORM: "doing"}, <add> {ORTH: "goin'", LEMMA: "go", NORM: "going"}, <add> {ORTH: "Goin'", LEMMA: "go", NORM: "going"}, <add> {ORTH: "goin", LEMMA: "go", NORM: "going"}, <add> {ORTH: "Goin", LEMMA: "go", NORM: "going"}, <add> <ide> <ide> {ORTH: "Mt.", LEMMA: "Mount", NORM: "Mount"}, <ide> {ORTH: "Ak.", LEMMA: "Alaska", NORM: "Alaska"},
1
Ruby
Ruby
avoid expensive pathname concatenation
49682e854d10e99e799e7aeefd1ec08e2ebcf56b
<ide><path>Library/Homebrew/download_strategy.rb <ide> def initialize name, package <ide> super <ide> <ide> if name.to_s.empty? || name == '__UNKNOWN__' <del> @tarball_path = HOMEBREW_CACHE + File.basename(@url) <add> @tarball_path = Pathname.new("#{HOMEBREW_CACHE}/#{File.basename(@url)}") <ide> else <del> @tarball_path = HOMEBREW_CACHE + "#{name}-#{package.version}#{ext}" <add> @tarball_path = Pathname.new("#{HOMEBREW_CACHE}/#{name}-#{package.version}#{ext}") <ide> end <ide> <ide> @mirrors = package.mirrors <del> @temporary_path = Pathname("#@tarball_path.incomplete") <add> @temporary_path = Pathname.new("#@tarball_path.incomplete") <ide> @local_bottle_path = nil <ide> end <ide> <ide> def initialize name, package <ide> if name.to_s.empty? || name == '__UNKNOWN__' <ide> raise NotImplementedError, "strategy requires a name parameter" <ide> else <del> @co = HOMEBREW_CACHE + "#{name}--svn" <add> @co = Pathname.new("#{HOMEBREW_CACHE}/#{name}--svn") <ide> end <ide> <ide> @co += "-HEAD" if ARGV.build_head? <ide> def initialize name, package <ide> if name.to_s.empty? || name == '__UNKNOWN__' <ide> raise NotImplementedError, "strategy requires a name parameter" <ide> else <del> @clone = HOMEBREW_CACHE + "#{name}--git" <add> @clone = Pathname.new("#{HOMEBREW_CACHE}/#{name}--git") <ide> end <ide> end <ide> <ide><path>Library/Homebrew/formula.rb <ide> def unpin <ide> end <ide> <ide> def linked_keg <del> HOMEBREW_REPOSITORY/'Library/LinkedKegs'/name <add> Pathname.new("#{HOMEBREW_LIBRARY}/LinkedKegs/#{name}") <ide> end <ide> <ide> def installed_prefix <ide> devel_prefix = unless devel.nil? <del> HOMEBREW_CELLAR/name/devel.version <add> Pathname.new("#{HOMEBREW_CELLAR}/#{name}/#{devel.version}") <ide> end <ide> <ide> head_prefix = unless head.nil? <del> HOMEBREW_CELLAR/name/head.version <add> Pathname.new("#{HOMEBREW_CELLAR}/#{name}/#{head.version}") <ide> end <ide> <ide> if active_spec == head || head and head_prefix.directory? <ide> def installed_version <ide> end <ide> <ide> def prefix <del> HOMEBREW_CELLAR+name+version <add> Pathname.new("#{HOMEBREW_CELLAR}/#{name}/#{version}") <ide> end <ide> def rack; prefix.parent end <ide> <ide> def plist_startup; self.class.plist_startup end <ide> # Defined and active build-time options. <ide> def build; self.class.build; end <ide> <del> def opt_prefix; HOMEBREW_PREFIX/:opt/name end <add> def opt_prefix <add> Pathname.new("#{HOMEBREW_PREFIX}/opt/#{name}") <add> end <ide> <ide> def download_strategy <ide> active_spec.download_strategy <ide> def self.aliases <ide> def self.canonical_name name <ide> name = name.to_s if name.kind_of? Pathname <ide> <del> formula_with_that_name = HOMEBREW_REPOSITORY+"Library/Formula/#{name}.rb" <del> possible_alias = HOMEBREW_REPOSITORY+"Library/Aliases/#{name}" <del> possible_cached_formula = HOMEBREW_CACHE_FORMULA+"#{name}.rb" <add> formula_with_that_name = Pathname.new("#{HOMEBREW_REPOSITORY}/Library/Formula/#{name}.rb") <add> possible_alias = Pathname.new("#{HOMEBREW_REPOSITORY}/Library/Aliases/#{name}") <add> possible_cached_formula = Pathname.new("#{HOMEBREW_CACHE_FORMULA}/#{name}.rb") <ide> <ide> if name.include? "/" <ide> if name =~ %r{(.+)/(.+)/(.+)} <del> tapd = HOMEBREW_REPOSITORY/"Library/Taps"/"#$1-#$2".downcase <add> tap_name = "#$1-#$2".downcase <add> tapd = Pathname.new("#{HOMEBREW_REPOSITORY}/Library/Taps/#{tap_name}") <ide> tapd.find_formula do |relative_pathname| <ide> return "#{tapd}/#{relative_pathname}" if relative_pathname.stem.to_s == $3 <ide> end if tapd.directory? <ide> def tap <ide> end <ide> <ide> def self.path name <del> HOMEBREW_REPOSITORY+"Library/Formula/#{name.downcase}.rb" <add> Pathname.new("#{HOMEBREW_REPOSITORY}/Library/Formula/#{name.downcase}.rb") <ide> end <ide> <ide> def deps; self.class.dependencies.deps; end <ide><path>Library/Homebrew/os/mac/xcode.rb <ide> def latest_version <ide> def prefix <ide> @prefix ||= begin <ide> path = Pathname.new(folder) <del> if path.absolute? and (path/'usr/bin/make').executable? <add> if path.absolute? and File.executable? "#{path}/usr/bin/make" <ide> path <ide> elsif File.executable? '/Developer/usr/bin/make' <ide> # we do this to support cowboys who insist on installing <ide> # only a subset of Xcode <del> Pathname.new '/Developer' <del> elsif (V4_BUNDLE_PATH/'Contents/Developer/usr/bin/make').executable? <add> Pathname.new('/Developer') <add> elsif File.executable? "#{V4_BUNDLE_PATH}/Contents/Developer/usr/bin/make" <ide> # fallback for broken Xcode 4.3 installs <del> V4_BUNDLE_PATH/'Contents/Developer' <add> Pathname.new("#{V4_BUNDLE_PATH}/Contents/Developer") <ide> else <ide> # Ask Spotlight where Xcode is. If the user didn't install the <ide> # helper tools and installed Xcode in a non-conventional place, this <ide> def prefix <ide> <ide> unless path.nil? <ide> path += "Contents/Developer" <del> path if (path/'usr/bin/make').executable? <add> path if File.executable? "#{path}/usr/bin/make" <ide> end <ide> end <ide> end <ide> def uncached_version <ide> <ide> # this shortcut makes version work for people who don't realise you <ide> # need to install the CLI tools <del> xcode43build = prefix/'usr/bin/xcodebuild' <add> xcode43build = Pathname.new("#{prefix}/usr/bin/xcodebuild") <ide> if xcode43build.file? <ide> `#{xcode43build} -version 2>/dev/null` =~ /Xcode (\d(\.\d)*)/ <ide> return $1 if $1 <ide><path>Library/Homebrew/os/mac/xquartz.rb <ide> def installed? <ide> # Confusingly, executables (e.g. config scripts) are only found under <ide> # /opt/X11/bin or /usr/X11/bin in all cases. <ide> def bin <del> prefix/'bin' <add> Pathname.new("#{prefix}/bin") <ide> end <ide> <ide> def include <ide> @include ||= if use_sdk? <del> MacOS.sdk_path/'usr/X11/include' <add> Pathname.new("#{MacOS.sdk_path}/usr/X11/include") <ide> else <del> prefix/'include' <add> Pathname.new("#{prefix}/include") <ide> end <ide> end <ide> <ide> def lib <ide> @lib ||= if use_sdk? <del> MacOS.sdk_path/'usr/X11/lib' <add> Pathname.new("#{MacOS.sdk_path}/usr/X11/lib") <ide> else <del> prefix/'lib' <add> Pathname.new("#{prefix}/lib") <ide> end <ide> end <ide> <ide> def share <del> prefix/'share' <add> Pathname.new("#{prefix}/share") <ide> end <ide> <ide> private
4
PHP
PHP
remove defunct property
f001d18218e397a78e8a05130086f877084eec1c
<ide><path>src/Controller/ErrorController.php <ide> */ <ide> class ErrorController extends Controller { <ide> <del>/** <del> * Uses Property <del> * <del> * @var array <del> */ <del> public $uses = array(); <del> <ide> /** <ide> * Constructor <ide> *
1
Text
Text
add readme with experimental warning for plugins
ec89e7387f4dce77406fec0c00ee2774dd02fa04
<ide><path>packages/next-plugin-google-analytics/readme.md <add># Unstable @next/plugin-google-analytics <add> <add>This package is still very experimental and should not be used at this point <ide><path>packages/next-plugin-material-ui/readme.md <add># Unstable @next/plugin-material-ui <add> <add>This package is still very experimental and should not be used at this point <ide><path>packages/next-plugin-sentry/readme.md <add># Unstable @next/plugin-sentry <add> <add>This package is still very experimental and should not be used at this point
3
Javascript
Javascript
use delegate for autolocation
54d8d3959ec89e229ac3aef5dcd4a8ab424b2b96
<ide><path>packages/ember-routing/lib/location/api.js <ide> import Ember from "ember-metal/core"; // deprecate, assert <ide> import environment from "ember-metal/environment"; <add>import { getHash } from "ember-routing/location/util"; <ide> <ide> /** <ide> @module ember <ide> export default { <ide> @since 1.4.0 <ide> */ <ide> _getHash: function () { <del> // AutoLocation has it at _location, HashLocation at .location. <del> // Being nice and not changing <del> var href = (this._location || this.location).href; <del> var hashIndex = href.indexOf('#'); <del> <del> if (hashIndex === -1) { <del> return ''; <del> } else { <del> return href.substr(hashIndex); <del> } <add> return getHash(this._location || this.location); <ide> } <ide> }; <ide><path>packages/ember-routing/lib/location/auto_location.js <ide> import Ember from "ember-metal/core"; // FEATURES <add>import { get } from "ember-metal/property_get"; <ide> import { set } from "ember-metal/property_set"; <add>import { computed } from "ember-metal/computed"; <ide> <del>import EmberLocation from "ember-routing/location/api"; <del>import HistoryLocation from "ember-routing/location/history_location"; <del>import HashLocation from "ember-routing/location/hash_location"; <del>import NoneLocation from "ember-routing/location/none_location"; <del> <add>import EmberObject from "ember-runtime/system/object"; <ide> import environment from "ember-metal/environment"; <del>import { supportsHashChange, supportsHistory } from "ember-routing/location/feature_detect"; <add>import { supportsHashChange, supportsHistory } from "ember-routing/location/util"; <add>import { getPath, getOrigin, getHash, getFullPath } from "ember-routing/location/util"; <ide> <ide> /** <ide> @module ember <ide> import { supportsHashChange, supportsHistory } from "ember-routing/location/feat <ide> @namespace Ember <ide> @static <ide> */ <del>export default { <add>export default EmberObject.extend({ <ide> /** <ide> @private <ide> <del> Attached for mocking in tests <add> The browser's `location` object. This is typically equivalent to <add> `window.location`, but may be overridden for testing. <ide> <ide> @property location <ide> @default environment.location <ide> */ <del> _location: environment.location, <add> location: environment.location, <ide> <ide> /** <ide> @private <ide> <del> Attached for mocking in tests <add> The browser's `history` object. This is typically equivalent to <add> `window.history`, but may be overridden for testing. <ide> <ide> @since 1.5.1 <ide> @property _history <ide> @default environment.history <ide> */ <del> _history: environment.history, <add> history: environment.history, <ide> <ide> /** <ide> @private <ide> <del> This property is used by router:main to know whether to cancel the routing <add> The browser's `userAgent`. This is typically equivalent to <add> `navigator.userAgent`, but may be overridden for testing. <add> <add> @since 1.5.1 <add> @property userAgent <add> @default environment.history <add> */ <add> userAgent: environment.userAgent, <add> <add> /** <add> @private <add> <add> This property is used by the router to know whether to cancel the routing <ide> setup process, which is needed while we redirect the browser. <ide> <ide> @since 1.5.1 <ide> export default { <ide> */ <ide> rootURL: '/', <ide> <del> /** <del> @private <del> <del> Attached for mocking in tests <del> <del> @since 1.5.1 <del> @property _HistoryLocation <del> @default Ember.HistoryLocation <del> */ <del> _HistoryLocation: HistoryLocation, <del> <del> /** <del> @private <del> <del> Attached for mocking in tests <del> <del> @since 1.5.1 <del> @property _HashLocation <del> @default Ember.HashLocation <del> */ <del> _HashLocation: HashLocation, <del> <del> /** <del> @private <del> <del> Attached for mocking in tests <del> <del> @since 1.5.1 <del> @property _NoneLocation <del> @default Ember.NoneLocation <del> */ <del> _NoneLocation: NoneLocation, <del> <del> /** <del> @private <del> <del> Returns location.origin or builds it if device doesn't support it. <del> <del> @method _getOrigin <del> */ <del> _getOrigin: function () { <del> var location = this._location; <del> var origin = location.origin; <del> <del> // Older browsers, especially IE, don't have origin <del> if (!origin) { <del> origin = location.protocol + '//' + location.hostname; <del> <del> if (location.port) { <del> origin += ':' + location.port; <del> } <add> concreteImplementation: computed(function() { <add> var implementation = detectImplementation({ <add> location: this.location, <add> history: this.history, <add> userAgent: this.userAgent, <add> rootURL: this.rootURL <add> }); <add> <add> if (implementation === false) { <add> set(this, 'cancelRouterSetup', true); <add> implementation = 'none'; <ide> } <ide> <del> return origin; <del> }, <del> <del> _userAgent: environment.userAgent, <del> <del> /** <del> @private <del> <del> @method _getSupportsHistory <del> */ <del> _getSupportsHistory: function () { <del> return supportsHistory(environment.userAgent, environment.history); <del> }, <del> <del> /** <del> @private <del> <del> @method _getSupportsHashChange <del> */ <del> _getSupportsHashChange: function () { <del> return supportsHashChange(document.documentMode, window); <del> }, <del> <del> /** <del> @private <del> <del> Redirects the browser using location.replace, prepending the location.origin <del> to prevent phishing attempts <del> <del> @method _replacePath <del> */ <del> _replacePath: function (path) { <del> this._location.replace(this._getOrigin() + path); <del> }, <del> <del> /** <del> @since 1.5.1 <del> @private <del> @method _getRootURL <del> */ <del> _getRootURL: function () { <del> return this.rootURL; <del> }, <add> return this.container.lookup('location:' + implementation); <add> }), <ide> <del> /** <del> @private <add> initState: delegateToConcreteImplementation('initState'), <add> getURL: delegateToConcreteImplementation('getURL'), <add> setURL: delegateToConcreteImplementation('setURL'), <add> replaceURL: delegateToConcreteImplementation('replaceURL'), <add> onUpdateURL: delegateToConcreteImplementation('onUpdateURL'), <add> formatURL: delegateToConcreteImplementation('formatURL'), <ide> <del> Returns the current `location.pathname`, normalized for IE inconsistencies. <add> willDestroy: function() { <add> var concreteImplementation = get(this, 'concreteImplementation'); <ide> <del> @method _getPath <del> */ <del> _getPath: function () { <del> var pathname = this._location.pathname; <del> // Various versions of IE/Opera don't always return a leading slash <del> if (pathname.charAt(0) !== '/') { <del> pathname = '/' + pathname; <add> if (concreteImplementation) { <add> concreteImplementation.destroy(); <ide> } <add> } <add>}); <ide> <del> return pathname; <del> }, <del> <del> /** <del> @private <del> <del> Returns normalized location.hash as an alias to Ember.Location._getHash <del> <del> @since 1.5.1 <del> @method _getHash <del> */ <del> _getHash: EmberLocation._getHash, <del> <del> /** <del> @private <del> <del> Returns location.search <del> <del> @since 1.5.1 <del> @method _getQuery <del> */ <del> _getQuery: function () { <del> return this._location.search; <del> }, <del> <del> /** <del> @private <add>function delegateToConcreteImplementation(methodName) { <add> return function() { <add> var concreteImplementation = get(this, 'concreteImplementation'); <add> concreteImplementation[methodName].apply(concreteImplementation, arguments); <add> }; <add>} <ide> <del> Returns the full pathname including query and hash <add>/** <add> Given the browser's `location`, `history` and `userAgent`, and a configured <add> root URL, this function detects whether the browser supports the [History <add> API](https://developer.mozilla.org/en-US/docs/Web/API/History) and returns a <add> string representing the Location object to use based on its determination. <ide> <del> @method _getFullPath <del> */ <del> _getFullPath: function () { <del> return this._getPath() + this._getQuery() + this._getHash(); <del> }, <add> For example, if the page loads in an evergreen browser, this function would <add> return the string "history", meaning the history API and thus HistoryLocation <add> should be used. If the page is loaded in IE8, it will return the string <add> "hash," indicating that the History API should be simulated by manipulating the <add> hash portion of the location. <ide> <del> /** <del> @private <add>*/ <ide> <del> Returns the current path as it should appear for HistoryLocation supported <del> browsers. This may very well differ from the real current path (e.g. if it <del> starts off as a hashed URL) <add>function detectImplementation(options) { <add> var location = options.location, <add> userAgent = options.userAgent, <add> history = options.history, <add> rootURL = options.rootURL; <ide> <del> @method _getHistoryPath <del> */ <del> _getHistoryPath: function () { <del> var rootURL = this._getRootURL(); <del> var path = this._getPath(); <del> var hash = this._getHash(); <del> var query = this._getQuery(); <del> var rootURLIndex = path.indexOf(rootURL); <del> var routeHash, hashParts; <del> <del> Ember.assert('Path ' + path + ' does not start with the provided rootURL ' + rootURL, rootURLIndex === 0); <del> <del> // By convention, Ember.js routes using HashLocation are required to start <del> // with `#/`. Anything else should NOT be considered a route and should <del> // be passed straight through, without transformation. <del> if (hash.substr(0, 2) === '#/') { <del> // There could be extra hash segments after the route <del> hashParts = hash.substr(1).split('#'); <del> // The first one is always the route url <del> routeHash = hashParts.shift(); <del> <del> // If the path already has a trailing slash, remove the one <del> // from the hashed route so we don't double up. <del> if (path.slice(-1) === '/') { <del> routeHash = routeHash.substr(1); <del> } <add> var implementation = 'none'; <add> var cancelRouterSetup = false; <add> var currentPath = getFullPath(location); <ide> <del> // This is the "expected" final order <del> path += routeHash; <del> path += query; <add> if (supportsHistory(userAgent, history)) { <add> var historyPath = getHistoryPath(rootURL, location); <ide> <del> if (hashParts.length) { <del> path += '#' + hashParts.join('#'); <add> // If the browser supports history and we have a history path, we can use <add> // the history location with no redirects. <add> if (currentPath === historyPath) { <add> return 'history'; <add> } else { <add> if (currentPath.substr(0, 2) === '/#') { <add> history.replaceState({ path: historyPath }, null, historyPath); <add> implementation = 'history'; <add> } else { <add> cancelRouterSetup = true; <add> replacePath(location, historyPath); <ide> } <add> } <add> } else if (supportsHashChange(document.documentMode, window)) { <add> var hashPath = getHashPath(location); <add> <add> // Be sure we're using a hashed path, otherwise let's switch over it to so <add> // we start off clean and consistent. We'll count an index path with no <add> // hash as "good enough" as well. <add> if (currentPath === hashPath || (currentPath === '/' && hashPath === '/#/')) { <add> implementation = 'hash'; <ide> } else { <del> path += query; <del> path += hash; <add> // Our URL isn't in the expected hash-supported format, so we want to <add> // cancel the router setup and replace the URL to start off clean <add> cancelRouterSetup = true; <add> replacePath(location, hashPath); <ide> } <add> } <ide> <del> return path; <del> }, <del> <del> /** <del> @private <add> if (cancelRouterSetup) { <add> return false; <add> } <ide> <del> Returns the current path as it should appear for HashLocation supported <del> browsers. This may very well differ from the real current path. <add> return implementation; <add>} <ide> <del> @method _getHashPath <del> */ <del> _getHashPath: function () { <del> var rootURL = this._getRootURL(); <del> var path = rootURL; <del> var historyPath = this._getHistoryPath(); <del> var routePath = historyPath.substr(rootURL.length); <del> <del> if (routePath !== '') { <del> if (routePath.charAt(0) !== '/') { <del> routePath = '/' + routePath; <del> } <add>/** <add> @private <ide> <del> path += '#' + routePath; <add> Returns the current path as it should appear for HistoryLocation supported <add> browsers. This may very well differ from the real current path (e.g. if it <add> starts off as a hashed URL) <add>*/ <add>function getHistoryPath(rootURL, location) { <add> var path = getPath(location); <add> var hash = getHash(location); <add> var query = this._getQuery(); <add> var rootURLIndex = path.indexOf(rootURL); <add> var routeHash, hashParts; <add> <add> Ember.assert('Path ' + path + ' does not start with the provided rootURL ' + rootURL, rootURLIndex === 0); <add> <add> // By convention, Ember.js routes using HashLocation are required to start <add> // with `#/`. Anything else should NOT be considered a route and should <add> // be passed straight through, without transformation. <add> if (hash.substr(0, 2) === '#/') { <add> // There could be extra hash segments after the route <add> hashParts = hash.substr(1).split('#'); <add> // The first one is always the route url <add> routeHash = hashParts.shift(); <add> <add> // If the path already has a trailing slash, remove the one <add> // from the hashed route so we don't double up. <add> if (path.slice(-1) === '/') { <add> routeHash = routeHash.substr(1); <ide> } <ide> <del> return path; <del> }, <add> // This is the "expected" final order <add> path += routeHash; <add> path += query; <ide> <del> /** <del> Selects the best location option based off browser support and returns an <del> instance of that Location class. <del> <del> @see Ember.AutoLocation <del> @method create <del> */ <del> create: function (options) { <del> if (options && options.rootURL) { <del> Ember.assert('rootURL must end with a trailing forward slash e.g. "/app/"', <del> options.rootURL.charAt(options.rootURL.length-1) === '/'); <del> this.rootURL = options.rootURL; <add> if (hashParts.length) { <add> path += '#' + hashParts.join('#'); <ide> } <add> } else { <add> path += query; <add> path += hash; <add> } <ide> <del> var historyPath, hashPath; <del> var cancelRouterSetup = false; <del> var implementationClass = this._NoneLocation; <del> var currentPath = this._getFullPath(); <del> <del> if (this._getSupportsHistory()) { <del> historyPath = this._getHistoryPath(); <add> return path; <add>} <ide> <del> // Since we support history paths, let's be sure we're using them else <del> // switch the location over to it. <del> if (currentPath === historyPath) { <del> implementationClass = this._HistoryLocation; <del> } else { <del> if (currentPath.substr(0, 2) === '/#') { <del> this._history.replaceState({ path: historyPath }, null, historyPath); <del> implementationClass = this._HistoryLocation; <del> } else { <del> cancelRouterSetup = true; <del> this._replacePath(historyPath); <del> } <del> } <add>/** <add> @private <ide> <del> } else if (this._getSupportsHashChange()) { <del> hashPath = this._getHashPath(); <add> Returns the current path as it should appear for HashLocation supported <add> browsers. This may very well differ from the real current path. <ide> <del> // Be sure we're using a hashed path, otherwise let's switch over it to so <del> // we start off clean and consistent. We'll count an index path with no <del> // hash as "good enough" as well. <del> if (currentPath === hashPath || (currentPath === '/' && hashPath === '/#/')) { <del> implementationClass = this._HashLocation; <del> } else { <del> // Our URL isn't in the expected hash-supported format, so we want to <del> // cancel the router setup and replace the URL to start off clean <del> cancelRouterSetup = true; <del> this._replacePath(hashPath); <del> } <add> @method _getHashPath <add>*/ <add>function getHashPath(rootURL, location) { <add> var path = rootURL; <add> var historyPath = getHistoryPath(rootURL, location); <add> var routePath = historyPath.substr(rootURL.length); <add> <add> if (routePath !== '') { <add> if (routePath.charAt(0) !== '/') { <add> routePath = '/' + routePath; <ide> } <ide> <del> var implementation = implementationClass.create.apply(implementationClass, arguments); <add> path += '#' + routePath; <add> } <ide> <del> if (cancelRouterSetup) { <del> set(implementation, 'cancelRouterSetup', true); <del> } <add> return path; <add>} <ide> <del> return implementation; <del> } <del>}; <add>function replacePath(location, path) { <add> location.replace(getOrigin(location) + path); <add>} <add><path>packages/ember-routing/lib/location/util.js <del><path>packages/ember-routing/lib/location/feature_detect.js <add>/** <add> @private <add> <add> Returns the current `location.pathname`, normalized for IE inconsistencies. <add>*/ <add>export function getPath(location) { <add> var pathname = location.pathname; <add> // Various versions of IE/Opera don't always return a leading slash <add> if (pathname.charAt(0) !== '/') { <add> pathname = '/' + pathname; <add> } <add> <add> return pathname; <add>} <add> <add>/** <add> @private <add> <add> Returns the current `location.search`. <add>*/ <add>export function getQuery(location) { <add> return location.search; <add>} <add> <add>/** <add> @private <add> <add> Returns the current `location.hash` by parsing location.href since browsers <add> inconsistently URL-decode `location.hash`. <add> <add> Should be passed the browser's `location` object as the first argument. <add> <add> https://bugzilla.mozilla.org/show_bug.cgi?id=483304 <add>*/ <add>export function getHash(location) { <add> var href = location.href; <add> var hashIndex = href.indexOf('#'); <add> <add> if (hashIndex === -1) { <add> return ''; <add> } else { <add> return href.substr(hashIndex); <add> } <add>} <add> <add>export function getFullPath(location) { <add> return getPath(location) + getQuery(location) + getHash(location); <add>} <add> <add>export function getOrigin(location) { <add> var origin = location.origin; <add> <add> // Older browsers, especially IE, don't have origin <add> if (!origin) { <add> origin = location.protocol + '//' + location.hostname; <add> <add> if (location.port) { <add> origin += ':' + location.port; <add> } <add> } <add> <add> return origin; <add>} <add> <ide> /* <ide> `documentMode` only exist in Internet Explorer, and it's tested because IE8 running in <ide> IE7 compatibility mode claims to support `onhashchange` but actually does not. <ide> export function supportsHashChange(documentMode, global) { <ide> @private <ide> @function supportsHistory <ide> */ <del> <ide> export function supportsHistory(userAgent, history) { <ide> // Boosted from Modernizr: https://github.com/Modernizr/Modernizr/blob/master/feature-detects/history.js <ide> // The stock browser on Android 2.2 & 2.3 returns positive on history support <ide><path>packages/ember-routing/tests/location/auto_location_test.js <ide> import copy from "ember-runtime/copy"; <ide> import EmberObject from "ember-runtime/system/object"; <ide> import AutoLocation from "ember-routing/location/auto_location"; <ide> import EmberLocation from "ember-routing/location/api"; <del>import { supportsHistory, supportsHashChange } from "ember-routing/location/feature_detect"; <add>import { supportsHistory, supportsHashChange } from "ember-routing/location/util"; <ide> <ide> var AutoTestLocation, location; <ide>
4
Javascript
Javascript
add coverage for invalid rsa-pss digests
27ea62a1712df2445db79f84d0cc498551fa0f1d
<ide><path>test/parallel/test-crypto-keygen.js <ide> const sec1EncExp = (cipher) => getRegExpForPEM('EC PRIVATE KEY', cipher); <ide> } <ide> ); <ide> } <add> <add> assert.throws(() => generateKeyPair('rsa-pss', { <add> modulusLength: 512, <add> hashAlgorithm: 'sha2', <add> }, common.mustNotCall()), { <add> name: 'TypeError', <add> code: 'ERR_CRYPTO_INVALID_DIGEST', <add> message: 'md specifies an invalid digest' <add> }); <add> <add> assert.throws(() => generateKeyPair('rsa-pss', { <add> modulusLength: 512, <add> mgf1HashAlgorithm: 'sha2', <add> }, common.mustNotCall()), { <add> name: 'TypeError', <add> code: 'ERR_CRYPTO_INVALID_DIGEST', <add> message: 'mgf1_md specifies an invalid digest' <add> }); <ide> } <ide> <ide> // Passing an empty passphrase string should not cause OpenSSL's default
1
Javascript
Javascript
remove rsaprivate and rename rsapublic
9f97f1044b7d111c3c8b8f5db0c8dd0e9bc11027
<ide><path>lib/internal/crypto/cipher.js <ide> const { StringDecoder } = require('string_decoder'); <ide> const { inherits } = require('util'); <ide> const { normalizeEncoding } = require('internal/util'); <ide> <del>function rsaPublic(method, defaultPadding) { <add>function rsaFunctionFor(method, defaultPadding) { <ide> return function(options, buffer) { <ide> const key = options.key || options; <ide> const padding = options.padding || defaultPadding; <ide> function rsaPublic(method, defaultPadding) { <ide> }; <ide> } <ide> <del>function rsaPrivate(method, defaultPadding) { <del> return function(options, buffer) { <del> const key = options.key || options; <del> const passphrase = options.passphrase || null; <del> const padding = options.padding || defaultPadding; <del> return method(toBuf(key), buffer, padding, passphrase); <del> }; <del>} <del> <del>const publicEncrypt = rsaPublic(_publicEncrypt, RSA_PKCS1_OAEP_PADDING); <del>const publicDecrypt = rsaPublic(_publicDecrypt, RSA_PKCS1_PADDING); <del>const privateEncrypt = rsaPrivate(_privateEncrypt, RSA_PKCS1_PADDING); <del>const privateDecrypt = rsaPrivate(_privateDecrypt, RSA_PKCS1_OAEP_PADDING); <add>const publicEncrypt = rsaFunctionFor(_publicEncrypt, RSA_PKCS1_OAEP_PADDING); <add>const publicDecrypt = rsaFunctionFor(_publicDecrypt, RSA_PKCS1_PADDING); <add>const privateEncrypt = rsaFunctionFor(_privateEncrypt, RSA_PKCS1_PADDING); <add>const privateDecrypt = rsaFunctionFor(_privateDecrypt, RSA_PKCS1_OAEP_PADDING); <ide> <ide> function getDecoder(decoder, encoding) { <ide> encoding = normalizeEncoding(encoding);
1
Java
Java
add test case on nested routerfunction attributes
5b1bda5c7cc1b28a559ead633121f67eea149acd
<ide><path>spring-webflux/src/test/java/org/springframework/web/reactive/function/server/AttributesTestVisitor.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> <ide> package org.springframework.web.reactive.function.server; <ide> <add>import java.util.Deque; <add>import java.util.LinkedList; <add>import java.util.List; <ide> import java.util.Map; <add>import java.util.Objects; <add>import java.util.Optional; <ide> import java.util.function.Function; <add>import java.util.stream.Collectors; <add>import java.util.stream.Stream; <ide> <ide> import reactor.core.publisher.Mono; <ide> <ide> import org.springframework.core.io.Resource; <ide> import org.springframework.lang.Nullable; <ide> <del>import static org.assertj.core.api.Assertions.assertThat; <del>import static org.assertj.core.api.Assertions.entry; <del> <ide> /** <ide> * @author Arjen Poutsma <ide> */ <ide> class AttributesTestVisitor implements RouterFunctions.Visitor { <ide> <add> private Deque<Map<String, Object>> nestedAttributes = new LinkedList<>(); <add> <ide> @Nullable <ide> private Map<String, Object> attributes; <ide> <add> private List<List<Map<String, Object>>> routerFunctionsAttributes = new LinkedList<>(); <add> <ide> private int visitCount; <ide> <add> public List<List<Map<String, Object>>> routerFunctionsAttributes() { <add> return this.routerFunctionsAttributes; <add> } <add> <ide> public int visitCount() { <ide> return this.visitCount; <ide> } <ide> <ide> @Override <ide> public void startNested(RequestPredicate predicate) { <add> nestedAttributes.addFirst(attributes); <add> attributes = null; <ide> } <ide> <ide> @Override <ide> public void endNested(RequestPredicate predicate) { <add> attributes = nestedAttributes.removeFirst(); <ide> } <ide> <ide> @Override <ide> public void route(RequestPredicate predicate, HandlerFunction<?> handlerFunction) { <del> assertThat(this.attributes).isNotNull(); <del> this.attributes = null; <add> Stream<Map<String, Object>> current = Optional.ofNullable(attributes).stream(); <add> Stream<Map<String, Object>> nested = nestedAttributes.stream().filter(Objects::nonNull); <add> routerFunctionsAttributes.add(Stream.concat(current, nested).collect(Collectors.toUnmodifiableList())); <add> attributes = null; <ide> } <ide> <ide> @Override <ide> public void resources(Function<ServerRequest, Mono<Resource>> lookupFunction) { <ide> <ide> @Override <ide> public void attributes(Map<String, Object> attributes) { <del> assertThat(attributes).containsExactly(entry("foo", "bar"), entry("baz", "qux")); <ide> this.attributes = attributes; <ide> this.visitCount++; <ide> } <ide><path>spring-webflux/src/test/java/org/springframework/web/reactive/function/server/RouterFunctionBuilderTests.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> <ide> import java.io.IOException; <ide> import java.util.Collections; <add>import java.util.List; <add>import java.util.Map; <ide> import java.util.concurrent.atomic.AtomicInteger; <ide> <ide> import org.junit.jupiter.api.Test; <ide> public void attributes() { <ide> atts.put("foo", "bar"); <ide> atts.put("baz", "qux"); <ide> }) <add> .path("/atts", b1 -> b1 <add> .GET("/3", request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "bar") <add> .GET("/4", request -> ServerResponse.ok().build()) <add> .withAttribute("baz", "qux") <add> .path("/5", b2 -> b2 <add> .GET(request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "n3")) <add> .withAttribute("foo", "n2") <add> ) <add> .withAttribute("foo", "n1") <ide> .build(); <ide> <ide> AttributesTestVisitor visitor = new AttributesTestVisitor(); <ide> route.accept(visitor); <del> assertThat(visitor.visitCount()).isEqualTo(2); <add> assertThat(visitor.routerFunctionsAttributes()).containsExactly( <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar"), Map.of("foo", "n1")), <add> List.of(Map.of("baz", "qux"), Map.of("foo", "n1")), <add> List.of(Map.of("foo", "n3"), Map.of("foo", "n2"), Map.of("foo", "n1")) <add> ); <add> assertThat(visitor.visitCount()).isEqualTo(7); <ide> } <del> <del> <ide> } <ide><path>spring-webflux/src/test/java/org/springframework/web/reactive/function/server/RouterFunctionTests.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> package org.springframework.web.reactive.function.server; <ide> <ide> import java.util.Collections; <add>import java.util.List; <add>import java.util.Map; <ide> <ide> import org.junit.jupiter.api.Test; <ide> import reactor.core.publisher.Mono; <ide> import org.springframework.web.testfixture.server.MockServerWebExchange; <ide> <ide> import static org.assertj.core.api.Assertions.assertThat; <add>import static org.springframework.http.HttpMethod.GET; <ide> import static org.springframework.web.reactive.function.server.RequestPredicates.GET; <add>import static org.springframework.web.reactive.function.server.RequestPredicates.method; <add>import static org.springframework.web.reactive.function.server.RequestPredicates.path; <ide> <ide> /** <ide> * @author Arjen Poutsma <ide> public void attributes() { <ide> .withAttributes(atts -> { <ide> atts.put("foo", "bar"); <ide> atts.put("baz", "qux"); <del> })); <add> })) <add> .and(RouterFunctions.nest(path("/atts"), <add> RouterFunctions.route(GET("/3"), request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "bar") <add> .and(RouterFunctions.route(GET("/4"), request -> ServerResponse.ok().build()) <add> .withAttribute("baz", "qux")) <add> .and(RouterFunctions.nest(path("/5"), <add> RouterFunctions.route(method(GET), request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "n3")) <add> .withAttribute("foo", "n2"))) <add> .withAttribute("foo", "n1")); <ide> <ide> AttributesTestVisitor visitor = new AttributesTestVisitor(); <ide> route.accept(visitor); <del> assertThat(visitor.visitCount()).isEqualTo(2); <add> assertThat(visitor.routerFunctionsAttributes()).containsExactly( <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar"), Map.of("foo", "n1")), <add> List.of(Map.of("baz", "qux"), Map.of("foo", "n1")), <add> List.of(Map.of("foo", "n3"), Map.of("foo", "n2"), Map.of("foo", "n1")) <add> ); <add> assertThat(visitor.visitCount()).isEqualTo(7); <ide> } <ide> <ide> <ide><path>spring-webmvc/src/test/java/org/springframework/web/servlet/function/AttributesTestVisitor.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> <ide> package org.springframework.web.servlet.function; <ide> <add>import java.util.Deque; <add>import java.util.LinkedList; <add>import java.util.List; <ide> import java.util.Map; <add>import java.util.Objects; <ide> import java.util.Optional; <ide> import java.util.function.Function; <add>import java.util.stream.Collectors; <add>import java.util.stream.Stream; <ide> <ide> import org.springframework.core.io.Resource; <ide> import org.springframework.lang.Nullable; <ide> <del>import static org.assertj.core.api.Assertions.assertThat; <del>import static org.assertj.core.api.Assertions.entry; <del> <ide> /** <ide> * @author Arjen Poutsma <ide> */ <ide> class AttributesTestVisitor implements RouterFunctions.Visitor { <ide> <add> private Deque<Map<String, Object>> nestedAttributes = new LinkedList<>(); <add> <ide> @Nullable <ide> private Map<String, Object> attributes; <ide> <add> private List<List<Map<String, Object>>> routerFunctionsAttributes = new LinkedList<>(); <add> <ide> private int visitCount; <ide> <add> public List<List<Map<String, Object>>> routerFunctionsAttributes() { <add> return this.routerFunctionsAttributes; <add> } <add> <ide> public int visitCount() { <ide> return this.visitCount; <ide> } <ide> <ide> @Override <ide> public void startNested(RequestPredicate predicate) { <add> nestedAttributes.addFirst(attributes); <add> attributes = null; <ide> } <ide> <ide> @Override <ide> public void endNested(RequestPredicate predicate) { <add> attributes = nestedAttributes.removeFirst(); <ide> } <ide> <ide> @Override <ide> public void route(RequestPredicate predicate, HandlerFunction<?> handlerFunction) { <del> assertThat(this.attributes).isNotNull(); <del> this.attributes = null; <add> Stream<Map<String, Object>> current = Optional.ofNullable(attributes).stream(); <add> Stream<Map<String, Object>> nested = nestedAttributes.stream().filter(Objects::nonNull); <add> routerFunctionsAttributes.add(Stream.concat(current, nested).collect(Collectors.toUnmodifiableList())); <add> attributes = null; <ide> } <ide> <ide> @Override <ide> public void resources(Function<ServerRequest, Optional<Resource>> lookupFunction <ide> <ide> @Override <ide> public void attributes(Map<String, Object> attributes) { <del> assertThat(attributes).containsExactly(entry("foo", "bar"), entry("baz", "qux")); <ide> this.attributes = attributes; <ide> this.visitCount++; <ide> } <ide><path>spring-webmvc/src/test/java/org/springframework/web/servlet/function/RouterFunctionBuilderTests.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> package org.springframework.web.servlet.function; <ide> <ide> import java.io.IOException; <add>import java.util.List; <add>import java.util.Map; <ide> import java.util.Optional; <ide> import java.util.concurrent.atomic.AtomicInteger; <ide> import java.util.function.Consumer; <ide> public void attributes() { <ide> atts.put("foo", "bar"); <ide> atts.put("baz", "qux"); <ide> }) <add> .path("/atts", b1 -> b1 <add> .GET("/3", request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "bar") <add> .GET("/4", request -> ServerResponse.ok().build()) <add> .withAttribute("baz", "qux") <add> .path("/5", b2 -> b2 <add> .GET(request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "n3")) <add> .withAttribute("foo", "n2") <add> ) <add> .withAttribute("foo", "n1") <ide> .build(); <ide> <ide> AttributesTestVisitor visitor = new AttributesTestVisitor(); <ide> route.accept(visitor); <del> assertThat(visitor.visitCount()).isEqualTo(2); <add> assertThat(visitor.routerFunctionsAttributes()).containsExactly( <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar"), Map.of("foo", "n1")), <add> List.of(Map.of("baz", "qux"), Map.of("foo", "n1")), <add> List.of(Map.of("foo", "n3"), Map.of("foo", "n2"), Map.of("foo", "n1")) <add> ); <add> assertThat(visitor.visitCount()).isEqualTo(7); <ide> } <del> <del> <ide> } <ide><path>spring-webmvc/src/test/java/org/springframework/web/servlet/function/RouterFunctionTests.java <ide> /* <del> * Copyright 2002-2020 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> package org.springframework.web.servlet.function; <ide> <ide> import java.util.Collections; <add>import java.util.List; <add>import java.util.Map; <ide> import java.util.Optional; <ide> <ide> import org.junit.jupiter.api.Test; <ide> <ide> import org.springframework.web.servlet.handler.PathPatternsTestUtils; <ide> <ide> import static org.assertj.core.api.Assertions.assertThat; <add>import static org.springframework.http.HttpMethod.GET; <ide> import static org.springframework.web.servlet.function.RequestPredicates.GET; <add>import static org.springframework.web.servlet.function.RequestPredicates.method; <add>import static org.springframework.web.servlet.function.RequestPredicates.path; <ide> <ide> /** <ide> * @author Arjen Poutsma <ide> public void attributes() { <ide> .withAttributes(atts -> { <ide> atts.put("foo", "bar"); <ide> atts.put("baz", "qux"); <del> })); <add> })) <add> .and(RouterFunctions.nest(path("/atts"), <add> RouterFunctions.route(GET("/3"), request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "bar") <add> .and(RouterFunctions.route(GET("/4"), request -> ServerResponse.ok().build()) <add> .withAttribute("baz", "qux")) <add> .and(RouterFunctions.nest(path("/5"), <add> RouterFunctions.route(method(GET), request -> ServerResponse.ok().build()) <add> .withAttribute("foo", "n3")) <add> .withAttribute("foo", "n2"))) <add> .withAttribute("foo", "n1")); <ide> <ide> AttributesTestVisitor visitor = new AttributesTestVisitor(); <ide> route.accept(visitor); <del> assertThat(visitor.visitCount()).isEqualTo(2); <add> assertThat(visitor.routerFunctionsAttributes()).containsExactly( <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar", "baz", "qux")), <add> List.of(Map.of("foo", "bar"), Map.of("foo", "n1")), <add> List.of(Map.of("baz", "qux"), Map.of("foo", "n1")), <add> List.of(Map.of("foo", "n3"), Map.of("foo", "n2"), Map.of("foo", "n1")) <add> ); <add> assertThat(visitor.visitCount()).isEqualTo(7); <ide> } <ide> <ide>
6
Javascript
Javascript
fix incomplete animation test
f61ff695192df2123ea086ddd5fe65e27e9b4e18
<ide><path>test/ngAnimate/animateSpec.js <ide> describe("ngAnimate", function() { <ide> expect(element.hasClass('hiding')).toBe(false); <ide> <ide> $animate.addClass(element, 'ng-hide'); <del> return <ide> <ide> if($sniffer.transitions) { <ide> expect(element).toBeShown(); //still showing <ide> $timeout.flush(); <ide> expect(element).toBeShown(); <add> $timeout.flushNext(5555); <ide> } <del> $timeout.flushNext(555); <del> if($sniffer.transitions) { <del> expect(element).toBeShown(); <del> $timeout.flushNext(5000); <del> } <add> $timeout.flush(); <ide> expect(element).toBeHidden(); <ide> <ide> expect(element.hasClass('showing')).toBe(false); <ide> describe("ngAnimate", function() { <ide> expect(element).toBeHidden(); <ide> $timeout.flush(); <ide> expect(element).toBeHidden(); <add> $timeout.flushNext(5580); <ide> } <del> $timeout.flushNext(25); <del> if($sniffer.transitions) { <del> expect(element).toBeHidden(); <del> $timeout.flushNext(5000); <del> } <add> $timeout.flush(); <ide> expect(element).toBeShown(); <ide> <ide> expect(element.hasClass('showing')).toBe(true);
1
Text
Text
update onboarding task
061e09891cd5115641b5c3199a5f457b519ec77b
<ide><path>doc/onboarding.md <ide> needs to be pointed out separately during the onboarding. <ide> -1` <ide> * Collaborators are in alphabetical order by GitHub username. <ide> * Optionally, include your personal pronouns. <del>* Label your pull request with the `doc` and `notable-change` labels. <add>* Label your pull request with the `doc`, `notable-change`, and `fast-track` <add> labels. <ide> * Run CI on the PR. Because the PR does not affect any code, use the <ide> `node-test-pull-request-lite-pipeline` CI task. <del>* After one or two approvals, land the PR (PRs of this type do not need to wait <del> for 48 hours to land). <add>* After two Collaborator approvals for the change and two Collaborator approvals <add> for fast-tracking, land the PR. <ide> * Be sure to add the `PR-URL: <full-pr-url>` and appropriate `Reviewed-By:` <ide> metadata. <ide> * [`node-core-utils`][] automates the generation of metadata and the landing
1
Python
Python
update doc for xlm and xlnet
44c985facdf562d6cf3d7cd72f2900e3a0d85d6e
<ide><path>pytorch_transformers/modeling_bert.py <ide> def init_weights(self, module): <ide> (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details). <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> """ <ide> <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # sequence_output, pooled_output, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with two heads on top as done during the pre-training: <add>@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training: <ide> a `masked language modeling` head and a `next sentence prediction (classification)` head. """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForPreTraining(BertPreTrainedModel): <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a `language modeling` head on top. """, <add>@add_start_docstrings("""Bert Model with a `language modeling` head on top. """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForMaskedLM(BertPreTrainedModel): <ide> r""" <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a `next sentence prediction (classification)` head on top. """, <add>@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForNextSentencePrediction(BertPreTrainedModel): <ide> r""" <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a sequence classification/regression head on top (a linear layer on top of <add>@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of <ide> the pooled output) e.g. for GLUE tasks. """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForSequenceClassification(BertPreTrainedModel): <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (loss), logits, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a multiple choice classification head on top (a linear layer on top of <add>@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of <ide> the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, <ide> BERT_START_DOCSTRING) <ide> class BertForMultipleChoice(BertPreTrainedModel): <ide> class BertForMultipleChoice(BertPreTrainedModel): <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, num_choices, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <ide> The second dimension of the input (`num_choices`) indicates the number of choices to score. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <ide> Labels for computing the multiple choice classification loss. <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (loss), reshaped_logits, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a token classification head on top (a linear layer on top of <add>@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of <ide> the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForTokenClassification(BertPreTrainedModel): <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, attention_m <ide> return outputs # (loss), scores, (hidden_states), (attentions) <ide> <ide> <del>@add_start_docstrings("""Bert Model transformer BERT model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of <add>@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of <ide> the hidden-states output to compute `span start logits` and `span end logits`). """, <ide> BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING) <ide> class BertForQuestionAnswering(BertPreTrainedModel): <ide> r""" <ide> **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <del> Position (index) of the start of the labelled span for computing the token classification loss. <add> Labels for position (index) of the start of the labelled span for computing the token classification loss. <ide> Positions are clamped to the length of the sequence (`sequence_length`). <ide> Position outside of the sequence are not taken into account for computing the loss. <ide> **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <del> Position (index) of the end of the labelled span for computing the token classification loss. <add> Labels for position (index) of the end of the labelled span for computing the token classification loss. <ide> Positions are clamped to the length of the sequence (`sequence_length`). <ide> Position outside of the sequence are not taken into account for computing the loss. <ide> <ide><path>pytorch_transformers/modeling_gpt2.py <ide> def init_weights(self, module): <ide> (see `past` output below). Can be used to speed up sequential decoding. <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> """ <ide> <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None, <ide> (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING) <ide> class GPT2LMHeadModel(GPT2PreTrainedModel): <ide> r""" <del> **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <ide> Labels for language modeling. <ide> Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` <ide> Indices are selected in ``[-1, 0, ..., config.vocab_size]`` <ide> All labels set to ``-1`` are ignored (masked), the loss is only <ide> computed for labels in ``[0, ..., config.vocab_size]`` <ide> <ide> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <del> **loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <ide> Language modeling loss. <ide> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <ide> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <ide> class GPT2LMHeadModel(GPT2PreTrainedModel): <ide> >>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2') <ide> >>> model = GPT2LMHeadModel(config) <ide> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <del> >>> outputs = model(input_ids, lm_labels=input_ids) <add> >>> outputs = model(input_ids, labels=input_ids) <ide> >>> loss, logits = outputs[:2] <ide> <ide> """ <ide> def tie_weights(self): <ide> self._tie_or_clone_weights(self.lm_head, <ide> self.transformer.wte) <ide> <del> def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None, head_mask=None): <add> def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, past=None, head_mask=None): <ide> transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, past, head_mask) <ide> hidden_states = transformer_outputs[0] <ide> <ide> lm_logits = self.lm_head(hidden_states) <ide> <ide> outputs = (lm_logits,) + transformer_outputs[1:] <del> if lm_labels is not None: <add> if labels is not None: <ide> # Shift so that tokens < n predict n <ide> shift_logits = lm_logits[..., :-1, :].contiguous() <del> shift_labels = lm_labels[..., 1:].contiguous() <add> shift_labels = labels[..., 1:].contiguous() <ide> # Flatten the tokens <ide> loss_fct = CrossEntropyLoss(ignore_index=-1) <ide> loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), <ide> class GPT2DoubleHeadsModel(GPT2PreTrainedModel): <ide> (see `past` output below). Can be used to speed up sequential decoding. <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, num_choices, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <ide> Labels for language modeling. <ide><path>pytorch_transformers/modeling_openai.py <ide> def init_weights(self, module): <ide> Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> """ <ide> <ide> def forward(self, input_ids, position_ids=None, token_type_ids=None, head_mask=N <ide> (linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING) <ide> class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): <ide> r""" <del> **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <ide> Labels for language modeling. <ide> Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` <ide> Indices are selected in ``[-1, 0, ..., config.vocab_size]`` <ide> All labels set to ``-1`` are ignored (masked), the loss is only <ide> computed for labels in ``[0, ..., config.vocab_size]`` <ide> <ide> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <del> **loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <ide> Language modeling loss. <ide> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <ide> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <ide> class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel): <ide> >>> tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') <ide> >>> model = OpenAIGPTLMHeadModel(config) <ide> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <del> >>> outputs = model(input_ids, lm_labels=input_ids) <add> >>> outputs = model(input_ids, labels=input_ids) <ide> >>> loss, logits = outputs[:2] <ide> <ide> """ <ide> def tie_weights(self): <ide> self._tie_or_clone_weights(self.lm_head, <ide> self.transformer.tokens_embed) <ide> <del> def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, head_mask=None): <add> def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, head_mask=None): <ide> transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, head_mask) <ide> hidden_states = transformer_outputs[0] <ide> lm_logits = self.lm_head(hidden_states) <ide> <ide> outputs = (lm_logits,) + transformer_outputs[1:] <del> if lm_labels is not None: <add> if labels is not None: <ide> # Shift so that tokens < n predict n <ide> shift_logits = lm_logits[..., :-1, :].contiguous() <del> shift_labels = lm_labels[..., 1:].contiguous() <add> shift_labels = labels[..., 1:].contiguous() <ide> # Flatten the tokens <ide> loss_fct = CrossEntropyLoss(ignore_index=-1) <ide> loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), <ide> class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): <ide> Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). <ide> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, num_choices, sequence_length)``: <ide> Mask to avoid performing attention on padding token indices. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <ide> Labels for language modeling. <ide><path>pytorch_transformers/modeling_transfo_xl.py <ide> def set_num_special_tokens(self, num_special_tokens): <ide> Indices can be obtained using :class:`pytorch_transformers.TransfoXLTokenizer`. <ide> See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and <ide> :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. <del> **mems**: <add> **mems**: (`optional`) <ide> list of ``torch.FloatTensor`` (one for each layer): <ide> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <ide> (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context. <ide> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <ide> Mask to nullify selected heads of the self-attention modules. <del> Mask indices selected in ``[0, 1]``: <add> Mask values selected in ``[0, 1]``: <ide> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <ide> """ <ide> <ide> class TransfoXLModel(TransfoXLPreTrainedModel): <ide> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <ide> **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` <ide> Sequence of hidden-states at the last layer of the model. <del> **mems**: ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <add> **mems**: <ide> list of ``torch.FloatTensor`` (one for each layer): <ide> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <ide> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <ide> class TransfoXLLMHeadModel(TransfoXLPreTrainedModel): <ide> **prediction_scores**: ``None`` if ``lm_labels`` is provided else ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <ide> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <ide> We don't output them when the loss is computed to speedup adaptive softmax decoding. <del> **mems**: ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <add> **mems**: <ide> list of ``torch.FloatTensor`` (one for each layer): <ide> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <ide> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <ide><path>pytorch_transformers/modeling_utils.py <ide> def forward(self, hidden_states, start_states=None, start_positions=None, cls_in <ide> <ide> class SQuADHead(nn.Module): <ide> """ A SQuAD head inspired by XLNet. <del> Compute <ide> """ <ide> def __init__(self, config): <ide> super(SQuADHead, self).__init__() <ide><path>pytorch_transformers/modeling_xlm.py <ide> from torch.nn import functional as F <ide> from torch.nn import CrossEntropyLoss, MSELoss <ide> <del>from .modeling_utils import (PretrainedConfig, PreTrainedModel, <add>from .modeling_utils import (PretrainedConfig, PreTrainedModel, add_start_docstrings, <ide> prune_linear_layer, SequenceSummary, SQuADHead) <ide> <ide> logger = logging.getLogger(__name__) <ide> def init_weights(self, module): <ide> module.weight.data.fill_(1.0) <ide> <ide> <del>class XLMModel(XLMPreTrainedModel): <del> """ <del> XLM model from: "Cross-lingual Language Model Pretraining" by Guillaume Lample, Alexis Conneau <add>XLM_START_DOCSTRING = r""" The XLM model was proposed in <add> `Cross-lingual Language Model Pretraining`_ <add> by Guillaume Lample*, Alexis Conneau*. It's a transformer pre-trained using one of the following objectives: <ide> <del> Paper: https://arxiv.org/abs/1901.07291 <add> - a causal language modeling (CLM) objective (next token prediction), <add> - a masked language modeling (MLM) objective (Bert-like), or <add> - a Translation Language Modeling (TLM) object (extension of Bert's MLM to multiple language inputs) <ide> <del> Original code: https://github.com/facebookresearch/XLM <add> Original code can be found `here`_. <ide> <del> Args: <del> `config`: a XLMConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <add> This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and <add> refer to the PyTorch documentation for all matter related to general usage and behavior. <ide> <del> Example:: <add> .. _`Cross-lingual Language Model Pretraining`: <add> https://arxiv.org/abs/1901.07291 <ide> <del> config = modeling.XLMConfig(vocab_size_or_config_json_file=32000, hidden_size=768, <del> num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) <add> .. _`torch.nn.Module`: <add> https://pytorch.org/docs/stable/nn.html#module <ide> <del> model = modeling.XLMModel(config=config) <del> """ <add> .. _`here`: <add> https://github.com/facebookresearch/XLM <add> <add> Parameters: <add> config (:class:`~pytorch_transformers.XLMConfig`): Model configuration class with all the parameters of the model. <add>""" <ide> <add>XLM_INPUTS_DOCSTRING = r""" <add> Inputs: <add> **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Indices of input sequence tokens in the vocabulary. <add> Indices can be obtained using :class:`pytorch_transformers.XLMTokenizer`. <add> See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and <add> :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. <add> **position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Indices of positions of each input sequence tokens in the position embeddings. <add> Selected in the range ``[0, config.max_position_embeddings - 1[``. <add> **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> A parallel sequence of tokens (can be used to indicate various portions of the inputs). <add> The embeddings from these tokens will be summed with the respective token embeddings. <add> Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). <add> **langs**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> A parallel sequence of tokens to be used to indicate the language of each token in the input. <add> Indices are selected in the pre-trained language vocabulary, <add> i.e. in the range ``[0, config.n_langs - 1[``. <add> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <add> Mask to avoid performing attention on padding token indices. <add> Mask values selected in ``[0, 1]``: <add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <add> **lengths**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Length of each sentence that can be used to avoid performing attention on padding token indices. <add> You can also use `attention_mask` for the same result (see above), kept here for compatbility. <add> Indices selected in ``[0, ..., input_ids.size(-1)]``: <add> **cache**: <add> dictionary with ``torch.FloatTensor`` that contains pre-computed <add> hidden-states (key and values in the attention blocks) as computed by the model <add> (see `cache` output below). Can be used to speed up sequential decoding. <add> The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states. <add> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <add> Mask to nullify selected heads of the self-attention modules. <add> Mask values selected in ``[0, 1]``: <add> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <add>""" <add> <add>@add_start_docstrings("The bare XLM Model transformer outputing raw hidden-states without any specific head on top.", <add> XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) <add>class XLMModel(XLMPreTrainedModel): <add> r""" <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` <add> Sequence of hidden-states at the last layer of the model. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLMConfig.from_pretrained('xlm-mlm-en-2048') <add> >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') <add> >>> model = XLMModel(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> outputs = model(input_ids) <add> >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple <add> <add> """ <ide> ATTRIBUTES = ['encoder', 'eos_index', 'pad_index', # 'with_output', <ide> 'n_langs', 'n_words', 'dim', 'n_layers', 'n_heads', <ide> 'hidden_dim', 'dropout', 'attention_dropout', 'asm', <ide> def _prune_heads(self, heads_to_prune): <ide> for layer, heads in heads_to_prune.items(): <ide> self.attentions[layer].prune_heads(heads) <ide> <del> def forward(self, input_ids, lengths=None, positions=None, langs=None, <add> def forward(self, input_ids, lengths=None, position_ids=None, langs=None, <ide> token_type_ids=None, attention_mask=None, cache=None, head_mask=None): # src_enc=None, src_len=None, <del> """ <del> Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.** <del> <del> Parameters: <del> `input_ids`: a ``torch.LongTensor`` of shape [batch_size, sequence_length] <del> with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts <del> `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`) <del> `lengths`: ``torch.LongTensor`` of size ``bs``, containing the length of each sentence <del> `positions`: ``torch.LongTensor`` of size ``(bs, slen)``, containing word positions <del> `langs`: ``torch.LongTensor`` of size ``(bs, slen)``, containing language IDs <del> `token_type_ids`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with the token <del> types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to <del> a `sentence B` token (see XLM paper for more details). <del> `attention_mask`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with indices <del> selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max <del> input sequence length in the current batch. It's the mask that we typically use for attention when <del> a batch has varying length sentences. <del> `cache`: TODO <del> `head_mask`: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. <del> It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. <del> <del> <del> Returns: <del> A ``tuple(encoded_layers, pooled_output)``, with <del> <del> ``encoded_layers``: controlled by ``output_all_encoded_layers`` argument: <del> <del> - ``output_all_encoded_layers=True``: outputs a list of the full sequences of encoded-hidden-states at the end \ <del> of each attention block (i.e. 12 full sequences for XLM-base, 24 for XLM-large), each \ <del> encoded-hidden-state is a ``torch.FloatTensor`` of size [batch_size, sequence_length, hidden_size], <del> <del> - ``output_all_encoded_layers=False``: outputs only the full sequence of hidden-states corresponding \ <del> to the last attention block of shape [batch_size, sequence_length, hidden_size], <del> <del> ``pooled_output``: a ``torch.FloatTensor`` of size [batch_size, hidden_size] which is the output of a <del> classifier pre-trained on top of the hidden state associated to the first character of the <del> input (`CLS`) to train on the Next-Sentence task (see XLM's paper). <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> # or <del> all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask) <del> """ <ide> if lengths is None: <ide> lengths = (input_ids != self.pad_index).sum(dim=1).long() <ide> # mask = input_ids != self.pad_index <ide> def forward(self, input_ids, lengths=None, positions=None, langs=None, <ide> # if self.is_decoder and src_enc is not None: <ide> # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] <ide> <del> # positions <del> if positions is None: <del> positions = input_ids.new((slen,)).long() <del> positions = torch.arange(slen, out=positions).unsqueeze(0) <add> # position_ids <add> if position_ids is None: <add> position_ids = input_ids.new((slen,)).long() <add> position_ids = torch.arange(slen, out=position_ids).unsqueeze(0) <ide> else: <del> assert positions.size() == (bs, slen) # (slen, bs) <del> # positions = positions.transpose(0, 1) <add> assert position_ids.size() == (bs, slen) # (slen, bs) <add> # position_ids = position_ids.transpose(0, 1) <ide> <ide> # langs <del> assert langs is None or token_type_ids is None, "You can only use one among langs and token_type_ids" <del> if token_type_ids is not None: <del> langs = token_type_ids <ide> if langs is not None: <ide> assert langs.size() == (bs, slen) # (slen, bs) <ide> # langs = langs.transpose(0, 1) <ide> def forward(self, input_ids, lengths=None, positions=None, langs=None, <ide> if cache is not None: <ide> _slen = slen - cache['slen'] <ide> input_ids = input_ids[:, -_slen:] <del> positions = positions[:, -_slen:] <add> position_ids = position_ids[:, -_slen:] <ide> if langs is not None: <ide> langs = langs[:, -_slen:] <ide> mask = mask[:, -_slen:] <ide> attn_mask = attn_mask[:, -_slen:] <ide> <ide> # embeddings <ide> tensor = self.embeddings(input_ids) <del> tensor = tensor + self.position_embeddings(positions).expand_as(tensor) <add> tensor = tensor + self.position_embeddings(position_ids).expand_as(tensor) <ide> if langs is not None: <ide> tensor = tensor + self.lang_embeddings(langs) <add> if token_type_ids is not None: <add> tensor = tensor + self.embeddings(token_type_ids) <ide> tensor = self.layer_norm_emb(tensor) <ide> tensor = F.dropout(tensor, p=self.dropout, training=self.training) <ide> tensor *= mask.unsqueeze(-1).to(tensor.dtype) <ide> def forward(self, x, y=None): <ide> return outputs <ide> <ide> <add>@add_start_docstrings("""The XLM Model transformer with a language modeling head on top <add> (linear layer with weights tied to the input embeddings). """, <add> XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) <ide> class XLMWithLMHeadModel(XLMPreTrainedModel): <del> """ XLM model from: "Cross-lingual Language Model Pretraining" by Guillaume Lample, Alexis Conneau <del> <del> Paper: https://arxiv.org/abs/1901.07291 <del> <del> Original code: https://github.com/facebookresearch/XLM <add> r""" <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Labels for language modeling. <add> Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` <add> Indices are selected in ``[-1, 0, ..., config.vocab_size]`` <add> All labels set to ``-1`` are ignored (masked), the loss is only <add> computed for labels in ``[0, ..., config.vocab_size]`` <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Language modeling loss. <add> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <add> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLMConfig.from_pretrained('xlm-mlm-en-2048') <add> >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') <add> >>> model = XLMWithLMHeadModel(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> outputs = model(input_ids) <add> >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple <ide> <del> Args: <del> `config`: a XLMConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> <del> Example:: <del> <del> config = modeling.XLMConfig(vocab_size_or_config_json_file=32000, hidden_size=768, <del> num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) <del> <del> model = modeling.XLMModel(config=config) <ide> """ <ide> def __init__(self, config): <ide> super(XLMWithLMHeadModel, self).__init__(config) <ide> def tie_weights(self): <ide> """ <ide> self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings) <ide> <del> def forward(self, input_ids, lengths=None, positions=None, langs=None, token_type_ids=None, <add> def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None, <ide> attention_mask=None, cache=None, labels=None, head_mask=None): <del> """ <del> Args: <del> `input_ids`: a ``torch.LongTensor`` of shape [batch_size, sequence_length] <del> with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts <del> `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`) <del> `lengths`: TODO <del> `positions`: TODO <del> `langs`: TODO <del> `token_type_ids`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with the token <del> types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to <del> a `sentence B` token (see XLM paper for more details). <del> `attention_mask`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with indices <del> selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max <del> input sequence length in the current batch. It's the mask that we typically use for attention when <del> a batch has varying length sentences. <del> `cache`: TODO <del> `labels`: TODO <del> `head_mask`: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. <del> It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. <del> <del> <del> Returns: <del> A ``tuple(encoded_layers, pooled_output)``, with <del> <del> ``encoded_layers``: controlled by ``output_all_encoded_layers`` argument: <del> <del> If ``output_all_encoded_layers=True``: outputs a list of the full sequences of encoded-hidden-states \ <del> at the end of each attention block (i.e. 12 full sequences for XLM-base, 24 for XLM-large), each \ <del> encoded-hidden-state is a ``torch.FloatTensor`` of size [batch_size, sequence_length, hidden_size], <del> <del> If ``output_all_encoded_layers=False``: outputs only the full sequence of hidden-states corresponding \ <del> to the last attention block of shape [batch_size, sequence_length, hidden_size], <del> <del> ``pooled_output``: a ``torch.FloatTensor`` of size [batch_size, hidden_size] which is the output of a \ <del> classifier pre-trained on top of the hidden state associated to the first character of the \ <del> input (`CLS`) to train on the Next-Sentence task (see XLM's paper). <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> # or <del> all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask) <del> """ <del> transformer_outputs = self.transformer(input_ids, lengths=lengths, positions=positions, token_type_ids=token_type_ids, <add> transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids, token_type_ids=token_type_ids, <ide> langs=langs, attention_mask=attention_mask, cache=cache, head_mask=head_mask) <ide> <ide> output = transformer_outputs[0] <ide> def forward(self, input_ids, lengths=None, positions=None, langs=None, token_typ <ide> return outputs <ide> <ide> <add>@add_start_docstrings("""XLM Model with a sequence classification/regression head on top (a linear layer on top of <add> the pooled output) e.g. for GLUE tasks. """, <add> XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) <ide> class XLMForSequenceClassification(XLMPreTrainedModel): <del> """XLM model ("XLM: Generalized Autoregressive Pretraining for Language Understanding"). <del> <del> Args: <del> `config`: a XLMConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> `summary_type`: str, "last", "first", "mean", or "attn". The method <del> to pool the input to get a vector representation. Default: last <del> <del> <del> <del> Example:: <del> <del> config = modeling.XLMConfig(vocab_size_or_config_json_file=32000, d_model=768, <del> n_layer=12, num_attention_heads=12, intermediate_size=3072) <del> <del> model = modeling.XLMModel(config=config) <add> r""" <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for computing the sequence classification/regression loss. <add> Indices should be in ``[0, ..., config.num_labels]``. <add> If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), <add> If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Classification (or regression if config.num_labels==1) loss. <add> **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` <add> Classification (or regression if config.num_labels==1) scores (before SoftMax). <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLMConfig.from_pretrained('xlm-mlm-en-2048') <add> >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') <add> >>> <add> >>> model = XLMForSequenceClassification(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 <add> >>> outputs = model(input_ids, labels=labels) <add> >>> loss, logits = outputs[:2] <ide> <ide> """ <ide> def __init__(self, config): <ide> def __init__(self, config): <ide> <ide> self.apply(self.init_weights) <ide> <del> def forward(self, input_ids, lengths=None, positions=None, langs=None, token_type_ids=None, <add> def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None, <ide> attention_mask=None, cache=None, labels=None, head_mask=None): <del> """ <del> Args: <del> input_ids: TODO <del> lengths: TODO <del> positions: TODO <del> langs: TODO <del> token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs. <del> attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the XLM model (which uses this negative masking). <del> You can only uses one among `input_mask` and `attention_mask` <del> cache: TODO <del> labels: TODO <del> head_mask: TODO <del> <del> <del> Returns: <del> A ``tuple(logits_or_loss, new_mems)``. If ``labels`` is ``None``, return token logits with shape <del> [batch_size, sequence_length]. If it isn't ``None``, return the ``CrossEntropy`` loss with the targets. <del> <del> ``new_mems`` is a list (num layers) of updated mem states at the entry of each layer \ <del> each mem state is a ``torch.FloatTensor`` of size [self.config.mem_len, batch_size, self.config.d_model] \ <del> Note that the first two dimensions are transposed in ``mems`` with regards to ``input_ids`` and ``labels`` <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> """ <del> transformer_outputs = self.transformer(input_ids, lengths=lengths, positions=positions, token_type_ids=token_type_ids, <add> transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids, token_type_ids=token_type_ids, <ide> langs=langs, attention_mask=attention_mask, cache=cache, head_mask=head_mask) <ide> <ide> output = transformer_outputs[0] <ide> def forward(self, input_ids, lengths=None, positions=None, langs=None, token_typ <ide> return outputs <ide> <ide> <add>@add_start_docstrings("""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of <add> the hidden-states output to compute `span start logits` and `span end logits`). """, <add> XLM_START_DOCSTRING, XLM_INPUTS_DOCSTRING) <ide> class XLMForQuestionAnswering(XLMPreTrainedModel): <del> """ <del> XLM model for Question Answering (span extraction). <del> This module is composed of the XLM model with a linear layer on top of <del> the sequence output that computes start_logits and end_logits <add> r""" <add> **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the start of the labelled span for computing the token classification loss. <add> Positions are clamped to the length of the sequence (`sequence_length`). <add> Position outside of the sequence are not taken into account for computing the loss. <add> **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the end of the labelled span for computing the token classification loss. <add> Positions are clamped to the length of the sequence (`sequence_length`). <add> Position outside of the sequence are not taken into account for computing the loss. <add> **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels whether a question has an answer or no answer (SQuAD 2.0) <add> **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the classification token to use as input for computing plausibility of the answer. <add> **p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...) <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. <add> **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` <add> Span-start scores (before SoftMax). <add> **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` <add> Span-end scores (before SoftMax). <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLMConfig.from_pretrained('xlm-mlm-en-2048') <add> >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') <add> >>> <add> >>> model = XLMForQuestionAnswering(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> start_positions = torch.tensor([1]) <add> >>> end_positions = torch.tensor([3]) <add> >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) <add> >>> loss, start_scores, end_scores = outputs[:2] <ide> <del> Args: <del> `config`: a XLMConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> <del> <del> <del> Example:: <del> <del> config = XLMConfig(vocab_size_or_config_json_file=32000, hidden_size=768, <del> num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) <del> <del> model = XLMForQuestionAnswering(config) <ide> """ <ide> def __init__(self, config): <ide> super(XLMForQuestionAnswering, self).__init__(config) <ide> def __init__(self, config): <ide> <ide> self.apply(self.init_weights) <ide> <del> def forward(self, input_ids, lengths=None, positions=None, langs=None, token_type_ids=None, <add> def forward(self, input_ids, lengths=None, position_ids=None, langs=None, token_type_ids=None, <ide> attention_mask=None, cache=None, start_positions=None, end_positions=None, <ide> cls_index=None, is_impossible=None, p_mask=None, head_mask=None): <del> <del> """ <del> Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.** <del> <del> Args: <del> input_ids: a ``torch.LongTensor`` of shape [batch_size, sequence_length] <del> with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts <del> `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`) <del> lengths: TODO <del> positions: TODO <del> langs: TODO <del> token_type_ids: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with the token <del> types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to <del> a `sentence B` token (see XLM paper for more details). <del> attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the XLM model (which uses this negative masking). <del> You can only uses one among `input_mask` and `attention_mask` <del> cache: TODO <del> start_positions: position of the first token for the labeled span: ``torch.LongTensor`` of shape [batch_size]. <del> Positions are clamped to the length of the sequence and position outside of the sequence are not taken <del> into account for computing the loss. <del> end_positions: position of the last token for the labeled span: ``torch.LongTensor`` of shape [batch_size]. <del> Positions are clamped to the length of the sequence and position outside of the sequence are not taken <del> into account for computing the loss. <del> cls_index: TODO <del> is_impossible: TODO <del> p_mask: TODO <del> head_mask: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. <del> It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. <del> <del> Returns: <del> Either the ``total_loss`` or a ``tuple(start_logits, end_logits)`` <del> <del> if ``start_positions`` and ``end_positions`` are not ``None``, \ <del> outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. <del> <del> if ``start_positions`` or ``end_positions`` is ``None``: <del> Outputs a ``tuple(start_logits, end_logits)`` which are the logits respectively for the start and end <del> position tokens of shape [batch_size, sequence_length]. <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> start_logits, end_logits = model(input_ids, token_type_ids, input_mask) <del> # or <del> start_logits, end_logits = model.forward(input_ids, token_type_ids, input_mask) <del> """ <del> <del> transformer_outputs = self.transformer(input_ids, lengths=lengths, positions=positions, token_type_ids=token_type_ids, <add> transformer_outputs = self.transformer(input_ids, lengths=lengths, position_ids=position_ids, token_type_ids=token_type_ids, <ide> langs=langs, attention_mask=attention_mask, cache=cache, head_mask=head_mask) <ide> <ide> output = transformer_outputs[0] <ide><path>pytorch_transformers/modeling_xlnet.py <ide> # limitations under the License. <ide> """ PyTorch XLNet model. <ide> """ <del>from __future__ import (absolute_import, division, print_function, <del> unicode_literals) <ide> from __future__ import absolute_import, division, print_function, unicode_literals <ide> <ide> import json <ide> from torch.nn import CrossEntropyLoss, MSELoss <ide> <ide> from .modeling_utils import (CONFIG_NAME, WEIGHTS_NAME, PretrainedConfig, PreTrainedModel, <del> SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits) <add> SequenceSummary, PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, <add> add_start_docstrings) <ide> <ide> <ide> logger = logging.getLogger(__name__) <ide> def init_weights(self, module): <ide> module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) <ide> <ide> <del>class XLNetModel(XLNetPreTrainedModel): <del> """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding"). <add>XLNET_START_DOCSTRING = r""" The XLNet model was proposed in <add> `XLNet: Generalized Autoregressive Pretraining for Language Understanding`_ <add> by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. <add> XLnet is an extension of the Transformer-XL model pre-trained using an autoregressive method <add> to learn bidirectional contexts by maximizing the expected likelihood over all permutations <add> of the input sequence factorization order. <ide> <del> TODO Lysandre filled: this was copied from the XLNetLMHeadModel, check that it's ok. <add> The specific attention pattern can be controlled at training and test time using the `perm_mask` input. <ide> <del> Args: <del> `config`: a XLNetConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <add> Do to the difficulty of training a fully auto-regressive model over various factorization order, <add> XLNet is pretrained using only a sub-set of the output tokens as target which are selected <add> with the `target_mapping` input. <add> <add> To use XLNet for sequential decoding (i.e. not in fully bi-directional setting), use the `perm_mask` and <add> `target_mapping` inputs to control the attention span and outputs (see examples in `examples/run_generation.py`) <ide> <add> This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and <add> refer to the PyTorch documentation for all matter related to general usage and behavior. <ide> <del> Example:: <add> .. _`XLNet: Generalized Autoregressive Pretraining for Language Understanding`: <add> http://arxiv.org/abs/1906.08237 <ide> <del> config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768, <del> n_layer=12, num_attention_heads=12, intermediate_size=3072) <add> .. _`torch.nn.Module`: <add> https://pytorch.org/docs/stable/nn.html#module <ide> <del> model = modeling.XLNetModel(config=config) <add> Parameters: <add> config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model. <add>""" <add> <add>XLNET_INPUTS_DOCSTRING = r""" <add> Inputs: <add> **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Indices of input sequence tokens in the vocabulary. <add> Indices can be obtained using :class:`pytorch_transformers.XLNetTokenizer`. <add> See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and <add> :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details. <add> **token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> A parallel sequence of tokens (can be used to indicate various portions of the inputs). <add> The embeddings from these tokens will be summed with the respective token embeddings. <add> Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices). <add> **attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <add> Mask to avoid performing attention on padding token indices. <add> Mask values selected in ``[0, 1]``: <add> ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. <add> **input_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``: <add> Mask to avoid performing attention on padding token indices. <add> Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding. <add> Kept for compatibility with the original code base. <add> You can only uses one of `input_mask` and `attention_mask` <add> Mask values selected in ``[0, 1]``: <add> ``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED. <add> **mems**: (`optional`) <add> list of ``torch.FloatTensor`` (one for each layer): <add> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <add> (see `mems` output below). Can be used to speed up sequential decoding and attend to longer context. <add> **perm_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, sequence_length)``: <add> Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``: <add> If ``perm_mask[k, i, j] = 0``, i attend to j in batch k; <add> if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k. <add> If None, each token attends to all the others (full bidirectional attention). <add> Only used during pretraining (to define factorization order) or for sequential decoding (generation). <add> **target_mapping**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_predict, sequence_length)``: <add> Mask to indicate the output tokens to use. <add> If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token. <add> Only used during pretraining for partial prediction or for sequential decoding (generation). <add> **head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``: <add> Mask to nullify selected heads of the self-attention modules. <add> Mask values selected in ``[0, 1]``: <add> ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**. <add>""" <add> <add>@add_start_docstrings("The bare XLNet Model transformer outputing raw hidden-states without any specific head on top.", <add> XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) <add>class XLNetModel(XLNetPreTrainedModel): <add> r""" <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)`` <add> Sequence of hidden-states at the last layer of the model. <add> **mems**: <add> list of ``torch.FloatTensor`` (one for each layer): <add> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <add> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLNetConfig.from_pretrained('xlnet-large-cased') <add> >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') <add> >>> model = XLNetModel(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> outputs = model(input_ids) <add> >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple <ide> <del> TODO Lysandre filled: Added example usage <ide> """ <ide> def __init__(self, config): <ide> super(XLNetModel, self).__init__(config) <ide> def relative_positional_encoding(self, qlen, klen, bsz=None): <ide> <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, <ide> mems=None, perm_mask=None, target_mapping=None, head_mask=None): <del> """ <del> Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.** <del> <del> Args: <del> input_ids: int32 Tensor in shape [bsz, len], the input token IDs. <del> token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs. <del> input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask. <del> 0 for real tokens and 1 for padding. <del> attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the BERT model (which uses this negative masking). <del> You can only uses one among `input_mask` and `attention_mask` <del> mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory <del> from previous batches. The length of the list equals n_layer. <del> If None, no memory is used. <del> perm_mask: [optional] float32 Tensor in shape [bsz, len, len]. <del> If perm_mask[k, i, j] = 0, i attend to j in batch k; <del> if perm_mask[k, i, j] = 1, i does not attend to j in batch k. <del> If None, each position attends to all the others. <del> target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len]. <del> If target_mapping[k, i, j] = 1, the i-th predict in batch k is <del> on the j-th token. <del> Only used during pretraining for partial prediction. <del> Set to None during finetuning. <del> head_mask: TODO Lysandre didn't fill <del> <del> <del> Returns: <del> TODO Lysandre didn't fill: Missing returns! <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> # or <del> all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask) <del> <del> TODO Lysandre filled: Filled with the LMHead example, is probably different since it has a different output <del> <del> """ <ide> # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end <ide> # but we want a unified interface in the library with the batch size on the first dimension <ide> # so we move here the first dimension (batch) to the end <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mas <ide> return outputs # outputs, new_mems, (hidden_states), (attentions) <ide> <ide> <add>@add_start_docstrings("""XLNet Model with a language modeling head on top <add> (linear layer with weights tied to the input embeddings). """, <add> XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) <ide> class XLNetLMHeadModel(XLNetPreTrainedModel): <del> """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding"). <del> <del> Args: <del> `config`: a XLNetConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> <del> Example:: <del> <del> config = modeling.XLNetConfig(vocab_size_or_config_json_file=32000, d_model=768, <del> n_layer=12, num_attention_heads=12, intermediate_size=3072) <add> r""" <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Labels for language modeling. <add> Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids`` <add> Indices are selected in ``[-1, 0, ..., config.vocab_size]`` <add> All labels set to ``-1`` are ignored (masked), the loss is only <add> computed for labels in ``[0, ..., config.vocab_size]`` <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Language modeling loss. <add> **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` <add> Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). <add> **mems**: <add> list of ``torch.FloatTensor`` (one for each layer): <add> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <add> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLNetConfig.from_pretrained('xlnet-large-cased') <add> >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') <add> >>> model = XLNetLMHeadModel(config) <add> >>> # We show how to setup inputs to predict a next token using a bi-directional context. <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>")).unsqueeze(0) # We will predict the masked token <add> >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) <add> >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token <add> >>> target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token <add> >>> target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) <add> >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) <add> >>> next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] <ide> <del> model = modeling.XLNetLMHeadModel(config=config) <del> <del> TODO Lysandre modified: Changed XLNetModel to XLNetLMHeadModel in the example <ide> """ <ide> def __init__(self, config): <ide> super(XLNetLMHeadModel, self).__init__(config) <ide> def tie_weights(self): <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, <ide> mems=None, perm_mask=None, target_mapping=None, <ide> labels=None, head_mask=None): <del> """ <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> <del> Args: <del> input_ids: int32 Tensor in shape [bsz, len], the input token IDs. <del> token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs. <del> input_mask: [optional] float32 Tensor in shape [bsz, len], the input mask. <del> 0 for real tokens and 1 for padding. <del> attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the BERT model (which uses this negative masking). <del> You can only uses one among `input_mask` and `attention_mask` <del> mems: [optional] a list of float32 Tensors in shape [mem_len, bsz, d_model], memory <del> from previous batches. The length of the list equals n_layer. <del> If None, no memory is used. <del> perm_mask: [optional] float32 Tensor in shape [bsz, len, len]. <del> If perm_mask[k, i, j] = 0, i attend to j in batch k; <del> if perm_mask[k, i, j] = 1, i does not attend to j in batch k. <del> If None, each position attends to all the others. <del> target_mapping: [optional] float32 Tensor in shape [bsz, num_predict, len]. <del> If target_mapping[k, i, j] = 1, the i-th predict in batch k is <del> on the j-th token. <del> Only used during pretraining for partial prediction. <del> Set to None during finetuning. <del> <del> Returns: <del> A ``tuple(encoded_layers, pooled_output)``, with <del> <del> ``encoded_layers``: controlled by ``output_all_encoded_layers`` argument: <del> <del> - ``output_all_encoded_layers=True``: outputs a list of the full sequences of encoded-hidden-states \ <del> at the end of each attention block (i.e. 12 full sequences for XLNet-base, 24 for XLNet-large), \ <del> each encoded-hidden-state is a ``torch.FloatTensor`` of size [batch_size, sequence_length, d_model], <del> <del> - ``output_all_encoded_layers=False``: outputs only the full sequence of hidden-states corresponding \ <del> to the last attention block of shape [batch_size, sequence_length, d_model], <del> <del> ``pooled_output``: a ``torch.FloatTensor`` of size [batch_size, d_model] which is the output of a \ <del> classifier pretrained on top of the hidden state associated to the first character of the \ <del> input (`CLS`) to train on the Next-Sentence task (see XLNet's paper). <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> # or <del> all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask) <del> """ <ide> transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask, <ide> mems, perm_mask, target_mapping, head_mask) <ide> <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mas <ide> labels.view(-1)) <ide> outputs = (loss,) + outputs <ide> <del> return outputs # return (loss), logits, (mems), (hidden states), (attentions) <add> return outputs # return (loss), logits, mems, (hidden states), (attentions) <ide> <ide> <add>@add_start_docstrings("""XLNet Model with a sequence classification/regression head on top (a linear layer on top of <add> the pooled output) e.g. for GLUE tasks. """, <add> XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) <ide> class XLNetForSequenceClassification(XLNetPreTrainedModel): <del> """XLNet model ("XLNet: Generalized Autoregressive Pretraining for Language Understanding"). <add> r""" <add> **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for computing the sequence classification/regression loss. <add> Indices should be in ``[0, ..., config.num_labels]``. <add> If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), <add> If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Classification (or regression if config.num_labels==1) loss. <add> **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)`` <add> Classification (or regression if config.num_labels==1) scores (before SoftMax). <add> **mems**: <add> list of ``torch.FloatTensor`` (one for each layer): <add> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <add> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLNetConfig.from_pretrained('xlnet-large-cased') <add> >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') <add> >>> <add> >>> model = XLNetForSequenceClassification(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 <add> >>> outputs = model(input_ids, labels=labels) <add> >>> loss, logits = outputs[:2] <ide> <del> Args: <del> `config`: a XLNetConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> `summary_type`: str, "last", "first", "mean", or "attn". The method <del> to pool the input to get a vector representation. Default: last <del> <del> <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <ide> """ <ide> def __init__(self, config): <ide> super(XLNetForSequenceClassification, self).__init__(config) <ide> def __init__(self, config): <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mask=None, <ide> mems=None, perm_mask=None, target_mapping=None, <ide> labels=None, head_mask=None): <del> """ <del> Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.** <del> <del> Args: <del> input_ids: int32 Tensor in shape [bsz, len], the input token IDs. <del> token_type_ids: int32 Tensor in shape [bsz, len], the input segment IDs. <del> input_mask: float32 Tensor in shape [bsz, len], the input mask. <del> 0 for real tokens and 1 for padding. <del> attention_mask: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the BERT model (which uses this negative masking). <del> You can only uses one among `input_mask` and `attention_mask` <del> mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory <del> from previous batches. The length of the list equals n_layer. <del> If None, no memory is used. <del> perm_mask: float32 Tensor in shape [bsz, len, len]. <del> If perm_mask[k, i, j] = 0, i attend to j in batch k; <del> if perm_mask[k, i, j] = 1, i does not attend to j in batch k. <del> If None, each position attends to all the others. <del> target_mapping: float32 Tensor in shape [bsz, num_predict, len]. <del> If target_mapping[k, i, j] = 1, the i-th predict in batch k is <del> on the j-th token. <del> Only used during pre-training for partial prediction. <del> Set to None during fine-tuning. <del> labels: TODO Lysandre didn't fill <del> head_mask: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. <del> It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. <del> <del> <del> Returns: <del> A ``tuple(logits_or_loss, mems)`` <del> <del> ``logits_or_loss``: if ``labels`` is ``None``, ``logits_or_loss`` corresponds to token logits with shape \ <del> [batch_size, sequence_length]. If it is not ``None``, it corresponds to the ``CrossEntropy`` loss \ <del> with the targets. <del> <del> ``new_mems``: list (num layers) of updated mem states at the entry of each layer \ <del> each mem state is a ``torch.FloatTensor`` of size [self.config.mem_len, batch_size, self.config.d_model] \ <del> Note that the first two dimensions are transposed in ``mems`` with regards to ``input_ids`` and ``labels`` <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) <del> # or <del> all_encoder_layers, pooled_output = model.forward(input_ids, token_type_ids, input_mask) <del> """ <ide> transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask, <ide> mems, perm_mask, target_mapping, head_mask) <ide> output = transformer_outputs[0] <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mas <ide> loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) <ide> outputs = (loss,) + outputs <ide> <del> return outputs # return (loss), logits, (mems), (hidden states), (attentions) <add> return outputs # return (loss), logits, mems, (hidden states), (attentions) <ide> <ide> <add>@add_start_docstrings("""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of <add> the hidden-states output to compute `span start logits` and `span end logits`). """, <add> XLNET_START_DOCSTRING, XLNET_INPUTS_DOCSTRING) <ide> class XLNetForQuestionAnswering(XLNetPreTrainedModel): <del> """ <del> XLNet model for Question Answering (span extraction). <add> r""" <add> **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the start of the labelled span for computing the token classification loss. <add> Positions are clamped to the length of the sequence (`sequence_length`). <add> Position outside of the sequence are not taken into account for computing the loss. <add> **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the end of the labelled span for computing the token classification loss. <add> Positions are clamped to the length of the sequence (`sequence_length`). <add> Position outside of the sequence are not taken into account for computing the loss. <add> **is_impossible**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels whether a question has an answer or no answer (SQuAD 2.0) <add> **cls_index**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``: <add> Labels for position (index) of the classification token to use as input for computing plausibility of the answer. <add> **p_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: <add> Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...) <add> <add> Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: <add> **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: <add> Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. <add> **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` <add> Span-start scores (before SoftMax). <add> **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)`` <add> Span-end scores (before SoftMax). <add> **mems**: <add> list of ``torch.FloatTensor`` (one for each layer): <add> that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model <add> (see `mems` input above). Can be used to speed up sequential decoding and attend to longer context. <add> **attentions**: (`optional`, returned when ``config.output_attentions=True``) <add> list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: <add> Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. <add> **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) <add> list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) <add> of shape ``(batch_size, sequence_length, hidden_size)``: <add> Hidden-states of the model at the output of each layer plus the initial embedding outputs. <add> <add> Examples:: <add> <add> >>> config = XLMConfig.from_pretrained('xlm-mlm-en-2048') <add> >>> tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048') <add> >>> <add> >>> model = XLMForQuestionAnswering(config) <add> >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 <add> >>> start_positions = torch.tensor([1]) <add> >>> end_positions = torch.tensor([3]) <add> >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) <add> >>> loss, start_scores, end_scores = outputs[:2] <ide> <del> This module is composed of the XLNet model with a linear layer on top of <del> the sequence output that computes ``start_logits`` and ``end_logits`` <del> <del> Args: <del> `config`: a XLNetConfig class instance with the configuration to build a new model <del> `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False <del> `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient. <del> This can be used to compute head importance metrics. Default: False <del> <del> Example:: <del> <del> config = XLNetConfig(vocab_size_or_config_json_file=32000, hidden_size=768, <del> num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) <del> <del> model = XLNetForQuestionAnswering(config) <ide> """ <ide> def __init__(self, config): <ide> super(XLNetForQuestionAnswering, self).__init__(config) <ide> def forward(self, input_ids, token_type_ids=None, input_mask=None, attention_mas <ide> mems=None, perm_mask=None, target_mapping=None, <ide> start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None, <ide> head_mask=None): <del> <del> """ <del> Performs a model forward pass. **Can be called by calling the class directly, once it has been instantiated.** <del> <del> Args: <del> `input_ids`: a ``torch.LongTensor`` of shape [batch_size, sequence_length] <del> with the word token indices in the vocabulary(see the tokens pre-processing logic in the scripts <del> `run_bert_extract_features.py`, `run_bert_classifier.py` and `run_bert_squad.py`) <del> `token_type_ids`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with the token <del> types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to <del> a `sentence B` token (see XLNet paper for more details). <del> `attention_mask`: [optional] float32 Tensor, SAME FUNCTION as `input_mask` <del> but with 1 for real tokens and 0 for padding. <del> Added for easy compatibility with the BERT model (which uses this negative masking). <del> You can only uses one among ``input_mask`` and ``attention_mask`` <del> `input_mask`: an optional ``torch.LongTensor`` of shape [batch_size, sequence_length] with indices <del> selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max <del> input sequence length in the current batch. It's the mask that we typically use for attention when <del> a batch has varying length sentences. <del> `start_positions`: position of the first token for the labeled span: ``torch.LongTensor`` of shape [batch_size]. <del> Positions are clamped to the length of the sequence and position outside of the sequence are not taken <del> into account for computing the loss. <del> `end_positions`: position of the last token for the labeled span: ``torch.LongTensor`` of shape [batch_size]. <del> Positions are clamped to the length of the sequence and position outside of the sequence are not taken <del> into account for computing the loss. <del> `head_mask`: an optional ``torch.Tensor`` of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1. <del> It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked. <del> <del> Returns: <del> if ``start_positions`` and ``end_positions`` are not ``None``, outputs the total_loss which is the sum of the \ <del> ``CrossEntropy`` loss for the start and end token positions. <del> <del> if ``start_positions`` or ``end_positions`` is ``None``, outputs a tuple of ``start_logits``, ``end_logits`` <del> which are the logits respectively for the start and end position tokens of shape \ <del> [batch_size, sequence_length]. <del> <del> Example:: <del> <del> # Already been converted into WordPiece token ids <del> input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) <del> input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) <del> token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) <del> <del> start_logits, end_logits = model(input_ids, token_type_ids, input_mask) <del> # or <del> start_logits, end_logits = model.forward(input_ids, token_type_ids, input_mask) <del> """ <ide> transformer_outputs = self.transformer(input_ids, token_type_ids, input_mask, attention_mask, <ide> mems, perm_mask, target_mapping, head_mask) <ide> hidden_states = transformer_outputs[0]
7
Text
Text
fix a minor typo in contributing
14d85931021c99a9a3fd52cc63472d72b60fd62d
<ide><path>CONTRIBUTING.md <ide> We will do our best to keep `master` in good shape, with tests passing at all ti <ide> <ide> Use `grunt test` to run the full test suite with PhantomJS. <ide> <del>This command is just a facade to [Jest](https://facebook.github.io/jest/). You may optionally `npm install -g jest-cli` run and use Jest commands directly to have more control over how tests are executed. <add>This command is just a facade to [Jest](https://facebook.github.io/jest/). You may optionally run `npm install -g jest-cli` and use Jest commands directly to have more control over how tests are executed. <ide> <ide> For example, `jest --watch` lets you automatically run the test suite on every file change. <ide>
1
Text
Text
fix minor typo in lifting-state-up.md
cec9b074024b8f1c897a22f9b12ec9a0301967b3
<ide><path>docs/docs/lifting-state-up.md <ide> Let's recap what happens when you edit an input: <ide> <ide> * React calls the function specified as `onChange` on the DOM `<input>`. In our case, this is the `handleChange` method in `TemperatureInput` component. <ide> * The `handleChange` method in the `TemperatureInput` component calls `this.props.onTemperatureChange()` with the new desired value. Its props, including `onTemperatureChange`, were provided by its parent component, the `Calculator`. <del>* When it previously rendered, the `Calculator` has specified that `onTemperatureChange` of the Celsius `TemperatureInput` is the `Calculator`'s `handleCelsiusChange` method, and `onTemperatureChange` of the Fahrenheit `TemperatureInput` is the `Calculator`'s `handleFahrehnheitChange` method. So either of these two `Calculator` methods gets called depending on which input we edited. <add>* When it previously rendered, the `Calculator` has specified that `onTemperatureChange` of the Celsius `TemperatureInput` is the `Calculator`'s `handleCelsiusChange` method, and `onTemperatureChange` of the Fahrenheit `TemperatureInput` is the `Calculator`'s `handleFahrenheitChange` method. So either of these two `Calculator` methods gets called depending on which input we edited. <ide> * Inside these methods, the `Calculator` component asks React to re-render itself by calling `this.setState()` with the new input value and the current scale of the input we just edited. <ide> * React calls the `Calculator` component's `render` method to learn what the UI should look like. The values of both inputs are recomputed based on the current temperature and the active scale. The temperature conversion is performed here. <ide> * React calls the `render` methods of the individual `TemperatureInput` components with their new props specified by the `Calculator`. It learns what their UI should look like.
1
Javascript
Javascript
add soundcloud pulse to the app showcase
7dbba3ba5f7d6cb161d67e4070f3827ffe1033a6
<ide><path>website/src/react-native/showcase.js <ide> var featured = [ <ide> 'https://blog.gyrosco.pe/the-making-of-gyroscope-running-a4ad10acc0d0', <ide> ], <ide> }, <add> { <add> name: 'SoundCloud Pulse', <add> icon: 'https://i1.sndcdn.com/artworks-000149203716-k5je96-original.jpg', <add> link: 'https://itunes.apple.com/us/app/soundcloud-pulse-for-creators/id1074278256?mt=8', <add> author: 'SoundCloud', <add> }, <ide> { <ide> name: 'Spero for Cancer', <ide> icon: 'https://s3-us-west-1.amazonaws.com/cancerspot/site_images/Spero1024.png',
1
PHP
PHP
add deprecation notices to a few methods
ea0e45490e14512a2e67eb38dcbe05b62e3478f9
<ide><path>src/Network/Response.php <ide> protected function _sendContent($content) <ide> * - an array of string headers is also accepted <ide> * @param string|array|null $value The header value(s) <ide> * @return array List of headers to be sent <add> * @deprecated 3.4.0 Use `withHeader()`, `getHeaderLine()` and `getHeaders()` instead. <ide> */ <ide> public function header($header = null, $value = null) <ide> { <ide> public function header($header = null, $value = null) <ide> * <ide> * @param null|string $url Either null to get the current location, or a string to set one. <ide> * @return string|null When setting the location null will be returned. When reading the location <del> * a string of the current location header value (if any) will be returned. <add> * a string of the current location header value (if any) will be returned. <add> * @deprecated 3.4.0 Mutable responses are deprecated. Use `withLocation()` and `getHeaderLine()` <add> * instead. <ide> */ <ide> public function location($url = null) <ide> { <ide> protected function _setHeader($header, $value) <ide> * <ide> * @param string|callable|null $content the string or callable message to be sent <ide> * @return string Current message buffer if $content param is passed as null <add> * @deprecated 3.4.0 Mutable response methods are deprecated. Use `withBody()` and `getBody()` instead. <ide> */ <ide> public function body($content = null) <ide> { <ide> protected function _handleCallableBody(callable $content) <ide> * @param int|null $code the HTTP status code <ide> * @return int Current status code <ide> * @throws \InvalidArgumentException When an unknown status code is reached. <del> * @deprecated 3.4.0 Use getStatusCode() to read the status code instead. <add> * @deprecated 3.4.0 Use `getStatusCode()` and `withStatusCode()` instead. <ide> */ <ide> public function statusCode($code = null) <ide> {
1
Mixed
Ruby
add config.disable_sandbox option to rails console
b27105252bce6a69f6d57c05d4ea28d20e84f8ae
<ide><path>guides/source/configuring.md <ide> application. Accepts a valid week day symbol (e.g. `:monday`). <ide> end <ide> ``` <ide> <add>* `config.disable_sandbox` controls whether or not someone could start a console in sandbox mode, as a long session of sandbox console could lead database server to run out of memory. <add> <ide> * `config.eager_load` when `true`, eager loads all registered `config.eager_load_namespaces`. This includes your application, engines, Rails frameworks, and any other registered namespace. <ide> <ide> * `config.eager_load_namespaces` registers namespaces that are eager loaded when `config.eager_load` is `true`. All namespaces in the list must respond to the `eager_load!` method. <ide><path>railties/CHANGELOG.md <add>* Add `config.disable_sandbox` option to Rails console. <add> <add> This setting will disable `rails console --sandbox` mode, preventing <add> developer from accidentally starting a sandbox console, left it inactive, <add> and cause the database server to run out of memory. <add> <add> *Prem Sichanugrist* <add> <ide> * Add `-e/--environment` option to `rails initializers`. <ide> <ide> *Yuji Yaginuma* <ide><path>railties/lib/rails/application/configuration.rb <ide> class Configuration < ::Rails::Engine::Configuration <ide> :session_options, :time_zone, :reload_classes_only_on_change, <ide> :beginning_of_week, :filter_redirect, :x, :enable_dependency_loading, <ide> :read_encrypted_secrets, :log_level, :content_security_policy_report_only, <del> :content_security_policy_nonce_generator, :require_master_key, :credentials <add> :content_security_policy_nonce_generator, :require_master_key, :credentials, <add> :disable_sandbox <ide> <ide> attr_reader :encoding, :api_only, :loaded_config_version, :autoloader <ide> <ide> def initialize(*) <ide> @credentials.content_path = default_credentials_content_path <ide> @credentials.key_path = default_credentials_key_path <ide> @autoloader = :classic <add> @disable_sandbox = false <ide> end <ide> <ide> def load_defaults(target_version) <ide><path>railties/lib/rails/commands/console/console_command.rb <ide> def initialize(app, options = {}) <ide> @options = options <ide> <ide> app.sandbox = sandbox? <add> <add> if sandbox? && app.config.disable_sandbox <add> puts "Error: Unable to start console in sandbox mode as sandbox mode is disabled (config.disable_sandbox is true)." <add> exit 1 <add> end <add> <ide> app.load_console <ide> <ide> @console = app.config.console || IRB <ide><path>railties/test/application/configuration_test.rb <ide> class MyLogger < ::Logger <ide> assert_includes Rails.application.config.hosts, ".localhost" <ide> end <ide> <add> test "disable_sandbox is false by default" do <add> app "development" <add> <add> assert_equal false, Rails.configuration.disable_sandbox <add> end <add> <add> test "disable_sandbox can be overridden" do <add> add_to_config <<-RUBY <add> config.disable_sandbox = true <add> RUBY <add> <add> app "development" <add> <add> assert Rails.configuration.disable_sandbox <add> end <add> <ide> private <ide> def force_lazy_load_hooks <ide> yield # Tasty clarifying sugar, homie! We only need to reference a constant to load it. <ide><path>railties/test/application/console_test.rb <ide> def write_prompt(command, expected_output = nil) <ide> assert_output "> ", @primary <ide> end <ide> <del> def spawn_console(options) <del> Process.spawn( <add> def spawn_console(options, wait_for_prompt: true) <add> pid = Process.spawn( <ide> "#{app_path}/bin/rails console #{options}", <ide> in: @replica, out: @replica, err: @replica <ide> ) <ide> <del> assert_output "> ", @primary, 30 <add> if wait_for_prompt <add> assert_output "> ", @primary, 30 <add> end <add> <add> pid <ide> end <ide> <ide> def test_sandbox <ide> def test_sandbox <ide> @primary.puts "quit" <ide> end <ide> <add> def test_sandbox_when_sandbox_is_disabled <add> add_to_config <<-RUBY <add> config.disable_sandbox = true <add> RUBY <add> <add> output = `#{app_path}/bin/rails console --sandbox` <add> <add> assert_includes output, "sandbox mode is disabled" <add> assert_equal 1, $?.exitstatus <add> end <add> <ide> def test_environment_option_and_irb_option <ide> spawn_console("-e test -- --verbose") <ide> <ide><path>railties/test/commands/console_test.rb <ide> def app <ide> def build_app(console) <ide> mocked_console = Class.new do <ide> attr_accessor :sandbox <del> attr_reader :console <add> attr_reader :console, :disable_sandbox <ide> <ide> def initialize(console) <ide> @console = console
7
Javascript
Javascript
add key warning to nested collections
086636747f26b577b4a4577a0888118310ee91b3
<ide><path>src/addons/__tests__/ReactFragment-test.js <ide> describe('ReactFragment', function() { <ide> z: <span /> <ide> }; <ide> var element = <div>{[children]}</div>; <del> expect(console.error.calls.length).toBe(0); <del> var container = document.createElement('div'); <del> React.render(element, container); <ide> expect(console.error.calls.length).toBe(1); <ide> expect(console.error.calls[0].args[0]).toContain( <ide> 'Any use of a keyed object' <ide> ); <add> var container = document.createElement('div'); <add> React.render(element, container); <add> expect(console.error.calls.length).toBe(1); <ide> }); <ide> <ide> it('should warn if accessing any property on a fragment', function() { <ide><path>src/classic/element/ReactElement.js <ide> var ReactElement = function(type, key, ref, owner, context, props) { <ide> // commonly used development environments. <ide> this._store = {props: props, originalProps: assign({}, props)}; <ide> <del> // To make comparing ReactElements easier for testing purposes, we make <del> // the validation flag non-enumerable (where possible, which should <del> // include every environment we run tests in), so the test framework <del> // ignores it. <del> try { <del> Object.defineProperty(this._store, 'validated', { <del> configurable: false, <del> enumerable: false, <del> writable: true <del> }); <del> } catch (x) { <del> } <del> this._store.validated = false; <del> <ide> // We're not allowed to set props directly on the object so we early <ide> // return and rely on the prototype membrane to forward to the backing <ide> // store. <ide> ReactElement.createElement = function(type, config, children) { <ide> props.children = children; <ide> } else if (childrenLength > 1) { <ide> var childArray = Array(childrenLength); <add> <add> // To make comparing ReactElements easier for testing purposes, we make <add> // the validation flag non-enumerable (where possible, which should <add> // include every environment we run tests in), so the test framework <add> // ignores it. <add> try { <add> Object.defineProperty(childArray, '_reactChildKeysValidated', { <add> configurable: false, <add> enumerable: false, <add> writable: true <add> }); <add> } catch (x) { <add> } <add> childArray._reactChildKeysValidated = true; <add> <ide> for (var i = 0; i < childrenLength; i++) { <ide> childArray[i] = arguments[i + 2]; <ide> } <ide> ReactElement.cloneAndReplaceProps = function(oldElement, newProps) { <ide> oldElement._context, <ide> newProps <ide> ); <del> <del> if (__DEV__) { <del> // If the key on the original is valid, then the clone is valid <del> newElement._store.validated = oldElement._store.validated; <del> } <ide> return newElement; <ide> }; <ide> <ide><path>src/classic/element/ReactElementValidator.js <ide> function getCurrentOwnerDisplayName() { <ide> * @param {*} parentType element's parent's type. <ide> */ <ide> function validateExplicitKey(element, parentType) { <del> if (element._store.validated || element.key != null) { <add> if (element.key != null) { <ide> return; <ide> } <del> element._store.validated = true; <del> <ide> warnAndMonitorForKeyUse( <ide> 'Each child in an array or iterator should have a unique "key" prop.', <ide> element, <ide> function warnAndMonitorForKeyUse(message, element, parentType) { <ide> */ <ide> function validateChildKeys(node, parentType) { <ide> if (Array.isArray(node)) { <add> if (node._reactChildKeysValidated) { <add> // All child elements were passed in a valid location. <add> return; <add> } <ide> for (var i = 0; i < node.length; i++) { <ide> var child = node[i]; <ide> if (ReactElement.isValidElement(child)) { <ide> validateExplicitKey(child, parentType); <add> } else { <add> // TODO: Warn on unkeyed arrays and suggest using createFragment <add> validateChildKeys(child, parentType); <ide> } <ide> } <ide> } else if (ReactElement.isValidElement(node)) { <ide> // This element was passed in a valid location. <del> node._store.validated = true; <add> return; <ide> } else if (node) { <ide> var iteratorFn = getIteratorFn(node); <ide> // Entry iterators provide implicit keys. <ide> function validateChildKeys(node, parentType) { <ide> while (!(step = iterator.next()).done) { <ide> if (ReactElement.isValidElement(step.value)) { <ide> validateExplicitKey(step.value, parentType); <add> } else { <add> validateChildKeys(step.value, parentType); <ide> } <ide> } <ide> } <ide> function validateChildKeys(node, parentType) { <ide> for (var key in fragment) { <ide> if (fragment.hasOwnProperty(key)) { <ide> validatePropertyKey(key, fragment[key], parentType); <add> validateChildKeys(fragment[key], parentType); <ide> } <ide> } <ide> } <ide><path>src/classic/element/__tests__/ReactElementValidator-test.js <ide> describe('ReactElementValidator', function() { <ide> ); <ide> }); <ide> <add> it('warns for keys for nested arrays of elements', function() { <add> spyOn(console, 'error'); <add> <add> var divs = [ <add> [ <add> <div />, <add> <div /> <add> ], <add> <div key="foo" /> <add> ]; <add> ReactTestUtils.renderIntoDocument(<div>{divs}</div>); <add> <add> expect(console.error.argsForCall.length).toBe(1); <add> expect(console.error.argsForCall[0][0]).toBe( <add> 'Warning: Each child in an array or iterator should have a unique ' + <add> '"key" prop. Check the React.render call using <div>. See ' + <add> 'https://fb.me/react-warning-keys for more information.' <add> ); <add> }); <add> <add> it('warns for keys when reusing children', function() { <add> spyOn(console, 'error'); <add> <add> var f = <span />; <add> var g = <span />; <add> <add> var children = [f, g]; <add> <add> return ( <add> <div> <add> <div key="0"> <add> {g} <add> </div> <add> <div key="1"> <add> {f} <add> </div> <add> <div key="2"> <add> {children} <add> </div> <add> </div> <add> ); <add> <add> expect(console.error.argsForCall.length).toBe(1); <add> expect(console.error.argsForCall[0][0]).toBe( <add> 'Warning: Each child in an array or iterator should have a unique ' + <add> '"key" prop. Check the React.render call using <div>. See ' + <add> 'https://fb.me/react-warning-keys for more information.' <add> ); <add> }); <add> <add> it('does not warn for keys when passing children down', function() { <add> spyOn(console, 'error'); <add> <add> debugger; <add> var Wrapper = React.createClass({ <add> render: function() { <add> return ( <add> <div> <add> {this.props.children} <add> <footer /> <add> </div> <add> ); <add> } <add> }); <add> <add> ReactTestUtils.renderIntoDocument( <add> <Wrapper> <add> <span /> <add> <span /> <add> </Wrapper> <add> ); <add> <add> expect(console.error.argsForCall.length).toBe(0); <add> }); <add> <ide> it('warns for keys for iterables of elements in rest args', function() { <ide> spyOn(console, 'error'); <ide> var Component = React.createFactory(ComponentClass);
4
Javascript
Javascript
remove an internal argument to the on method
04a29696e5b176ac66401120e433d52425222f0f
<ide><path>src/event.js <ide> function safeActiveElement() { <ide> } catch ( err ) { } <ide> } <ide> <add>function on( elem, types, selector, data, fn, one ) { <add> var origFn, type; <add> <add> // Types can be a map of types/handlers <add> if ( typeof types === "object" ) { <add> // ( types-Object, selector, data ) <add> if ( typeof selector !== "string" ) { <add> // ( types-Object, data ) <add> data = data || selector; <add> selector = undefined; <add> } <add> for ( type in types ) { <add> on( elem, type, selector, data, types[ type ], one ); <add> } <add> return elem; <add> } <add> <add> if ( data == null && fn == null ) { <add> // ( types, fn ) <add> fn = selector; <add> data = selector = undefined; <add> } else if ( fn == null ) { <add> if ( typeof selector === "string" ) { <add> // ( types, selector, fn ) <add> fn = data; <add> data = undefined; <add> } else { <add> // ( types, data, fn ) <add> fn = data; <add> data = selector; <add> selector = undefined; <add> } <add> } <add> if ( fn === false ) { <add> fn = returnFalse; <add> } <add> <add> if ( one === 1 ) { <add> origFn = fn; <add> fn = function( event ) { <add> // Can use an empty set, since event contains the info <add> jQuery().off( event ); <add> return origFn.apply( this, arguments ); <add> }; <add> // Use same guid so caller can remove using origFn <add> fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); <add> } <add> return elem.each( function() { <add> jQuery.event.add( this, types, fn, data, selector ); <add> }); <add>} <add> <ide> /* <ide> * Helper functions for managing events -- not part of the public interface. <ide> * Props to Dean Edwards' addEvent library for many of the ideas. <ide> if ( !support.focusin ) { <ide> <ide> jQuery.fn.extend({ <ide> <del> on: function( types, selector, data, fn, /*INTERNAL*/ one ) { <del> var origFn, type; <del> <del> // Types can be a map of types/handlers <del> if ( typeof types === "object" ) { <del> // ( types-Object, selector, data ) <del> if ( typeof selector !== "string" ) { <del> // ( types-Object, data ) <del> data = data || selector; <del> selector = undefined; <del> } <del> for ( type in types ) { <del> this.on( type, selector, data, types[ type ], one ); <del> } <del> return this; <del> } <del> <del> if ( data == null && fn == null ) { <del> // ( types, fn ) <del> fn = selector; <del> data = selector = undefined; <del> } else if ( fn == null ) { <del> if ( typeof selector === "string" ) { <del> // ( types, selector, fn ) <del> fn = data; <del> data = undefined; <del> } else { <del> // ( types, data, fn ) <del> fn = data; <del> data = selector; <del> selector = undefined; <del> } <del> } <del> if ( fn === false ) { <del> fn = returnFalse; <del> } <del> <del> if ( one === 1 ) { <del> origFn = fn; <del> fn = function( event ) { <del> // Can use an empty set, since event contains the info <del> jQuery().off( event ); <del> return origFn.apply( this, arguments ); <del> }; <del> // Use same guid so caller can remove using origFn <del> fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); <del> } <del> return this.each( function() { <del> jQuery.event.add( this, types, fn, data, selector ); <del> }); <add> on: function( types, selector, data, fn ) { <add> return on( this, types, selector, data, fn ); <ide> }, <ide> one: function( types, selector, data, fn ) { <del> return this.on( types, selector, data, fn, 1 ); <add> return on( this, types, selector, data, fn, 1 ); <ide> }, <ide> off: function( types, selector, fn ) { <ide> var handleObj, type;
1
Java
Java
fix terminal state handling
b5e493366f784faf1fc194d59473a993d0db0e45
<ide><path>rxjava-core/src/main/java/rx/operators/OperatorTake.java <ide> public Observer<T> call(final Observer<? super T> o, final OperatorSubscription <ide> return new Observer<T>() { <ide> <ide> int count = 0; <add> boolean completed = false; <ide> <ide> @Override <ide> public void onCompleted() { <del> o.onCompleted(); <add> if (!completed) { <add> o.onCompleted(); <add> } <ide> } <ide> <ide> @Override <ide> public void onError(Throwable e) { <del> o.onError(e); <add> if (!completed) { <add> o.onError(e); <add> } <ide> } <ide> <ide> @Override <ide> public void onNext(T i) { <ide> if (!s.isUnsubscribed()) { <ide> o.onNext(i); <ide> if (++count >= limit) { <add> completed = true; <ide> o.onCompleted(); <ide> s.unsubscribe(); <ide> } <ide><path>rxjava-core/src/perf/java/rx/operators/OperatorTakePerformance.java <ide> public void call() { <ide> /** <ide> * Observable.range(0, 10).take(5); <ide> * <del> * Run: 10 - 6,660,042 ops/sec <del> * Run: 11 - 6,721,423 ops/sec <del> * Run: 12 - 6,556,035 ops/sec <del> * Run: 13 - 6,692,284 ops/sec <del> * Run: 14 - 6,731,287 ops/sec <add> * Run: 10 - 8,780,556 ops/sec <add> * Run: 11 - 8,822,590 ops/sec <add> * Run: 12 - 8,842,733 ops/sec <add> * Run: 13 - 8,825,486 ops/sec <add> * Run: 14 - 8,771,545 ops/sec <ide> */ <ide> public long timeTake5() { <ide> <ide><path>rxjava-core/src/test/java/rx/operators/OperatorTakeTest.java <ide> import static org.junit.Assert.*; <ide> import static org.mockito.Matchers.*; <ide> import static org.mockito.Mockito.*; <del>import static rx.operators.OperatorTake.*; <ide> <ide> import java.util.Arrays; <del>import java.util.concurrent.TimeUnit; <ide> import java.util.concurrent.atomic.AtomicBoolean; <add>import java.util.concurrent.atomic.AtomicInteger; <ide> <ide> import org.junit.Test; <ide> import org.mockito.InOrder; <ide> <ide> import rx.Observable; <ide> import rx.Observer; <ide> import rx.Subscription; <del>import rx.operators.OperationSkipTest.CustomException; <del>import rx.schedulers.TestScheduler; <del>import rx.subjects.PublishSubject; <ide> import rx.subscriptions.Subscriptions; <add>import rx.util.functions.Action0; <ide> import rx.util.functions.Func1; <ide> <ide> public class OperatorTakeTest {
3
PHP
PHP
remove comments before compiling components
2964d2dfd3cc50f7a709effee0af671c86587915
<ide><path>src/Illuminate/View/Compilers/BladeCompiler.php <ide> class BladeCompiler extends Compiler implements CompilerInterface <ide> * @var array <ide> */ <ide> protected $compilers = [ <del> 'Comments', <add> // 'Comments', <ide> 'Extensions', <ide> 'Statements', <ide> 'Echos', <ide> public function compileString($value) <ide> // step which compiles the component Blade tags into @component directives <ide> // that may be used by Blade. Then we should call any other precompilers. <ide> $value = $this->compileComponentTags( <del> $this->storeUncompiledBlocks($value) <add> $this->compileComments($this->storeUncompiledBlocks($value)) <ide> ); <ide> <ide> foreach ($this->precompilers as $precompiler) {
1
Python
Python
stop timer threads after staring them
154e0df85fdef7d857862430f35e531cd47d849d
<ide><path>celery/tests/test_worker.py <ide> def qos(self, **kwargs): <ide> l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger) <ide> l.event_dispatcher = MockEventDispatcher() <ide> l.receive_message(m.decode(), m) <add> l.eta_schedule.stop() <ide> <ide> items = [entry[2] for entry in self.eta_schedule.queue] <ide> found = 0 <ide> def test_receieve_message_eta(self): <ide> self.assertTrue(dispatcher.flushed) <ide> l.event_dispatcher = MockEventDispatcher() <ide> l.receive_message(m.decode(), m) <del> <add> l.eta_schedule.stop() <ide> in_hold = self.eta_schedule.queue[0] <ide> self.assertEqual(len(in_hold), 3) <ide> eta, priority, entry = in_hold
1
Javascript
Javascript
fix seed paths
79cbe13c62221df523726660c81413aaf580c9a2
<ide><path>server/boot/challenge.js <ide> var R = require('ramda'), <ide> resources = require('../resources/resources'), <ide> userMigration = require('../resources/middleware').userMigration, <del> MDNlinks = require('../../seed_data/bonfireMDNlinks'); <add> MDNlinks = require('../../seed/bonfireMDNlinks'); <ide> <ide> var challengeMapWithNames = resources.getChallengeMapWithNames(); <ide> var challengeMapWithIds = resources.getChallengeMapWithIds(); <ide><path>server/boot/fieldGuide.js <ide> function returnNextFieldGuide(req, res, next) { <ide> "You've read all our current Field Guide entries. You can ", <ide> 'contribute to our Field Guide ', <ide> "<a href='https://github.com/FreeCodeCamp/freecodecamp/blob/", <del> "staging/seed_data/field-guides.json'>here</a>." <add> "staging/seed/field-guides.json'>here</a>." <ide> ].join('') <ide> }); <ide> } <ide><path>server/resources/resources.js <ide> var async = require('async'), <ide> Story = require('../../common/models/Story'), <ide> Comment = require('../../common/models/Comment'), <ide> resources = require('./resources.json'), <del> nonprofits = require('../../seed_data/nonprofits.json'), <del> fieldGuides = require('../../seed_data/field-guides.json'); <add> nonprofits = require('../../seed/nonprofits.json'), <add> fieldGuides = require('../../seed/field-guides.json'); <ide> <ide> /** <ide> * Cached values <ide> Array.zip = function(left, right, combinerFunction) { <ide> if (!challengeMap) { <ide> var localChallengeMap = {}; <ide> var files = fs.readdirSync( <del> path.join(__dirname, '../../seed_data/challenges') <add> path.join(__dirname, '../../seed/challenges') <ide> ); <ide> var keyCounter = 0; <ide> files = files.map(function (file) { <ide> return require( <del> path.join(__dirname, '../../seed_data/challenges/' + file) <add> path.join(__dirname, '../../seed/challenges/' + file) <ide> ); <ide> }); <ide> files = files.sort(function (a, b) {
3
Ruby
Ruby
write options to file
3396d479d2cc87fed0f9a74f711881ea1784e0d6
<ide><path>Library/Homebrew/build.rb <ide> def install <ide> else <ide> formula.prefix.mkpath <ide> <add> (formula.logs/"00.options.out").write \ <add> "#{formula.full_name} #{formula.build.used_options.sort.join(" ")}".strip <ide> formula.install <ide> <ide> stdlibs = detect_stdlibs(ENV.compiler)
1
Go
Go
drop driver abstraction
be4f4644a8ebb940f0d68dc66f59327a74775ad6
<ide><path>builder/remotecontext/archive.go <ide> type archiveContext struct { <ide> } <ide> <ide> func (c *archiveContext) Close() error { <del> return c.root.RemoveAll(c.root.Path()) <add> return os.RemoveAll(c.root.Path()) <ide> } <ide> <ide> func convertPathError(err error, cleanpath string) error { <ide> func (c *archiveContext) Remove(path string) error { <ide> if err != nil { <ide> return err <ide> } <del> return c.root.RemoveAll(fullpath) <add> return os.RemoveAll(fullpath) <ide> } <ide> <ide> func (c *archiveContext) Hash(path string) (string, error) { <ide><path>builder/remotecontext/detect.go <ide> func openAt(remote builder.Source, path string) (driver.File, error) { <ide> if err != nil { <ide> return nil, err <ide> } <del> return remote.Root().Open(fullPath) <add> return os.Open(fullPath) <ide> } <ide> <ide> // StatAt is a helper for calling Stat on a path from a source <ide> func StatAt(remote builder.Source, path string) (os.FileInfo, error) { <ide> if err != nil { <ide> return nil, err <ide> } <del> return remote.Root().Stat(fullPath) <add> return os.Stat(fullPath) <ide> } <ide> <ide> // FullPath is a helper for getting a full path for a path from a source <ide><path>builder/remotecontext/detect_test.go <ide> func (r *stubRemote) Close() error { <ide> return errors.New("not implemented") <ide> } <ide> func (r *stubRemote) Remove(p string) error { <del> return r.root.Remove(filepath.Join(r.root.Path(), p)) <add> return os.Remove(filepath.Join(r.root.Path(), p)) <ide> } <ide><path>builder/remotecontext/lazycontext.go <ide> func (c *lazySource) Hash(path string) (string, error) { <ide> return "", errors.WithStack(convertPathError(err, cleanPath)) <ide> } <ide> <del> fi, err := c.root.Lstat(fullPath) <add> fi, err := os.Lstat(fullPath) <ide> if err != nil { <ide> // Backwards compatibility: a missing file returns a path as hash. <ide> // This is reached in the case of a broken symlink. <ide> func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) <ide> return "", errors.Wrapf(err, "failed to create hash for %s", relPath) <ide> } <ide> if fi.Mode().IsRegular() && fi.Size() > 0 { <del> f, err := c.root.Open(p) <add> f, err := os.Open(p) <ide> if err != nil { <ide> return "", errors.Wrapf(err, "failed to open %s", relPath) <ide> } <ide><path>builder/remotecontext/tarsum_test.go <ide> func TestRemoveDirectory(t *testing.T) { <ide> <ide> src := makeTestArchiveContext(t, contextDir) <ide> <del> _, err = src.Root().Stat(filepath.Join(src.Root().Path(), relativePath)) <add> _, err = os.Stat(filepath.Join(src.Root().Path(), relativePath)) <ide> if err != nil { <ide> t.Fatalf("Statting %s shouldn't fail: %+v", relativePath, err) <ide> } <ide> func TestRemoveDirectory(t *testing.T) { <ide> t.Fatalf("Error when executing Remove: %s", err) <ide> } <ide> <del> _, err = src.Root().Stat(filepath.Join(src.Root().Path(), relativePath)) <add> _, err = os.Stat(filepath.Join(src.Root().Path(), relativePath)) <ide> if !errors.Is(err, os.ErrNotExist) { <ide> t.Fatalf("Directory should not exist at this point: %+v ", err) <ide> } <ide><path>container/archive.go <ide> func (container *Container) StatPath(resolvedPath, absPath string) (stat *types. <ide> } <ide> driver := container.BaseFS <ide> <del> lstat, err := driver.Lstat(resolvedPath) <add> lstat, err := os.Lstat(resolvedPath) <ide> if err != nil { <ide> return nil, err <ide> } <ide><path>daemon/archive.go <ide> func (daemon *Daemon) containerExtractToDir(container *container.Container, path <ide> return err <ide> } <ide> <del> stat, err := driver.Lstat(resolvedPath) <add> stat, err := os.Lstat(resolvedPath) <ide> if err != nil { <ide> return err <ide> } <ide> func (daemon *Daemon) containerCopy(container *container.Container, resource str <ide> if err != nil { <ide> return nil, err <ide> } <del> stat, err := driver.Stat(basePath) <add> stat, err := os.Stat(basePath) <ide> if err != nil { <ide> return nil, err <ide> } <ide><path>daemon/graphdriver/graphtest/graphbench_unix.go <ide> package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphte <ide> <ide> import ( <ide> "io" <add> "os" <ide> "path/filepath" <ide> "testing" <ide> <del> contdriver "github.com/containerd/continuity/driver" <ide> "github.com/docker/docker/pkg/stringid" <ide> "gotest.tools/v3/assert" <ide> ) <ide> func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d <ide> for i := 0; i < b.N; i++ { <ide> <ide> // Read content <del> c, err := contdriver.ReadFile(root, filepath.Join(root.Path(), "testfile.txt")) <add> c, err := os.ReadFile(filepath.Join(root.Path(), "testfile.txt")) <ide> if err != nil { <ide> b.Fatal(err) <ide> } <ide><path>daemon/graphdriver/graphtest/graphtest_unix.go <ide> func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str <ide> verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0) <ide> <ide> // Verify that the directory is empty <del> fis, err := readDir(dir, dir.Path()) <add> fis, err := readDir(dir.Path()) <ide> assert.NilError(t, err) <ide> assert.Check(t, is.Len(fis, 0)) <ide> <ide><path>daemon/graphdriver/graphtest/testutil.go <ide> package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphte <ide> import ( <ide> "bytes" <ide> "fmt" <add> "io/fs" <ide> "math/rand" <ide> "os" <ide> "path/filepath" <ide> "sort" <ide> <del> "github.com/containerd/continuity/driver" <ide> "github.com/docker/docker/daemon/graphdriver" <ide> "github.com/docker/docker/pkg/archive" <ide> "github.com/docker/docker/pkg/stringid" <ide> func addFiles(drv graphdriver.Driver, layer string, seed int64) error { <ide> } <ide> defer drv.Put(layer) <ide> <del> if err := driver.WriteFile(root, filepath.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil { <ide> return err <ide> } <del> if err := root.MkdirAll(filepath.Join(root.Path(), "dir-b"), 0755); err != nil { <add> if err := os.MkdirAll(filepath.Join(root.Path(), "dir-b"), 0755); err != nil { <ide> return err <ide> } <del> if err := driver.WriteFile(root, filepath.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { <ide> return err <ide> } <ide> <del> return driver.WriteFile(root, filepath.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755) <add> return os.WriteFile(filepath.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755) <ide> } <ide> <ide> func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { <ide> func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e <ide> } <ide> defer drv.Put(layer) <ide> <del> fileContent, err := driver.ReadFile(root, filepath.Join(root.Path(), filename)) <add> fileContent, err := os.ReadFile(filepath.Join(root.Path(), filename)) <ide> if err != nil { <ide> return err <ide> } <ide> func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err <ide> } <ide> defer drv.Put(layer) <ide> <del> return driver.WriteFile(root, filepath.Join(root.Path(), filename), content, 0755) <add> return os.WriteFile(filepath.Join(root.Path(), filename), content, 0755) <ide> } <ide> <ide> func addDirectory(drv graphdriver.Driver, layer, dir string) error { <ide> func addDirectory(drv graphdriver.Driver, layer, dir string) error { <ide> } <ide> defer drv.Put(layer) <ide> <del> return root.MkdirAll(filepath.Join(root.Path(), dir), 0755) <add> return os.MkdirAll(filepath.Join(root.Path(), dir), 0755) <ide> } <ide> <ide> func removeAll(drv graphdriver.Driver, layer string, names ...string) error { <ide> func removeAll(drv graphdriver.Driver, layer string, names ...string) error { <ide> defer drv.Put(layer) <ide> <ide> for _, filename := range names { <del> if err := root.RemoveAll(filepath.Join(root.Path(), filename)); err != nil { <add> if err := os.RemoveAll(filepath.Join(root.Path(), filename)); err != nil { <ide> return err <ide> } <ide> } <ide> func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { <ide> } <ide> defer drv.Put(layer) <ide> <del> if _, err := root.Stat(filepath.Join(root.Path(), filename)); err == nil { <add> if _, err := os.Stat(filepath.Join(root.Path(), filename)); err == nil { <ide> return fmt.Errorf("file still exists: %s", filepath.Join(root.Path(), filename)) <ide> } else if !os.IsNotExist(err) { <ide> return err <ide> func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) e <ide> <ide> for i := 0; i < count; i += 100 { <ide> dir := filepath.Join(root.Path(), fmt.Sprintf("directory-%d", i)) <del> if err := root.MkdirAll(dir, 0755); err != nil { <add> if err := os.MkdirAll(dir, 0755); err != nil { <ide> return err <ide> } <ide> for j := 0; i+j < count && j < 100; j++ { <ide> file := filepath.Join(dir, fmt.Sprintf("file-%d", i+j)) <del> if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil { <add> if err := os.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { <ide> return err <ide> } <ide> } <ide> func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 <ide> var changes []archive.Change <ide> for i := 0; i < count; i += 100 { <ide> archiveRoot := fmt.Sprintf("/directory-%d", i) <del> if err := root.MkdirAll(filepath.Join(root.Path(), archiveRoot), 0755); err != nil { <add> if err := os.MkdirAll(filepath.Join(root.Path(), archiveRoot), 0755); err != nil { <ide> return nil, err <ide> } <ide> for j := 0; i+j < count && j < 100; j++ { <ide> func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 <ide> case 0: <ide> change.Path = filepath.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) <ide> change.Kind = archive.ChangeModify <del> if err := driver.WriteFile(root, filepath.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { <ide> return nil, err <ide> } <ide> // Add file <ide> case 1: <ide> change.Path = filepath.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) <ide> change.Kind = archive.ChangeAdd <del> if err := driver.WriteFile(root, filepath.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { <ide> return nil, err <ide> } <ide> // Remove file <ide> case 2: <ide> change.Path = filepath.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) <ide> change.Kind = archive.ChangeDelete <del> if err := root.Remove(filepath.Join(root.Path(), change.Path)); err != nil { <add> if err := os.Remove(filepath.Join(root.Path(), change.Path)); err != nil { <ide> return nil, err <ide> } <ide> } <ide> func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) <ide> dir := filepath.Join(root.Path(), fmt.Sprintf("directory-%d", i)) <ide> for j := 0; i+j < count && j < 100; j++ { <ide> file := filepath.Join(dir, fmt.Sprintf("file-%d", i+j)) <del> fileContent, err := driver.ReadFile(root, file) <add> fileContent, err := os.ReadFile(file) <ide> if err != nil { <ide> return err <ide> } <ide> func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { <ide> } <ide> defer drv.Put(layer) <ide> <del> if err := driver.WriteFile(root, filepath.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil { <ide> return err <ide> } <ide> layerDir := filepath.Join(root.Path(), fmt.Sprintf("layer-%d", i)) <del> if err := root.MkdirAll(layerDir, 0755); err != nil { <add> if err := os.MkdirAll(layerDir, 0755); err != nil { <ide> return err <ide> } <del> if err := driver.WriteFile(root, filepath.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { <ide> return err <ide> } <del> return driver.WriteFile(root, filepath.Join(layerDir, "parent-id"), []byte(parent), 0755) <add> return os.WriteFile(filepath.Join(layerDir, "parent-id"), []byte(parent), 0755) <ide> } <ide> <ide> func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { <ide> func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { <ide> } <ide> defer drv.Put(layer) <ide> <del> layerIDBytes, err := driver.ReadFile(root, filepath.Join(root.Path(), "top-id")) <add> layerIDBytes, err := os.ReadFile(filepath.Join(root.Path(), "top-id")) <ide> if err != nil { <ide> return err <ide> } <ide> func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { <ide> for i := count; i > 0; i-- { <ide> layerDir := filepath.Join(root.Path(), fmt.Sprintf("layer-%d", i)) <ide> <del> thisLayerIDBytes, err := driver.ReadFile(root, filepath.Join(layerDir, "layer-id")) <add> thisLayerIDBytes, err := os.ReadFile(filepath.Join(layerDir, "layer-id")) <ide> if err != nil { <ide> return err <ide> } <ide> if !bytes.Equal(thisLayerIDBytes, layerIDBytes) { <ide> return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) <ide> } <del> layerIDBytes, err = driver.ReadFile(root, filepath.Join(layerDir, "parent-id")) <add> layerIDBytes, err = os.ReadFile(filepath.Join(layerDir, "parent-id")) <ide> if err != nil { <ide> return err <ide> } <ide> } <ide> return nil <ide> } <ide> <del>// readDir reads a directory just like driver.ReadDir() <add>// readDir reads a directory just like os.ReadDir() <ide> // then hides specific files (currently "lost+found") <ide> // so the tests don't "see" it <del>func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) { <del> a, err := driver.ReadDir(r, dir) <add>func readDir(dir string) ([]fs.DirEntry, error) { <add> a, err := os.ReadDir(dir) <ide> if err != nil { <ide> return nil, err <ide> } <ide><path>daemon/graphdriver/graphtest/testutil_unix.go <ide> func createBase(t testing.TB, driver graphdriver.Driver, name string) { <ide> defer driver.Put(name) <ide> <ide> subdir := filepath.Join(dirFS.Path(), "a subdir") <del> assert.NilError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) <del> assert.NilError(t, dirFS.Lchown(subdir, 1, 2)) <add> assert.NilError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) <add> assert.NilError(t, contdriver.LocalDriver.Lchown(subdir, 1, 2)) <ide> <ide> file := filepath.Join(dirFS.Path(), "a file") <del> err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid) <add> err = os.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) <ide> assert.NilError(t, err) <ide> } <ide> <ide> func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { <ide> file := filepath.Join(dirFS.Path(), "a file") <ide> verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) <ide> <del> files, err := readDir(dirFS, dirFS.Path()) <add> files, err := readDir(dirFS.Path()) <ide> assert.NilError(t, err) <ide> assert.Check(t, is.Len(files, 2)) <ide> } <ide><path>layer/layer_test.go <ide> func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { <ide> <ide> func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error { <ide> fullPath := filepath.Join(root.Path(), tf.name) <del> if err := root.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { <add> if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { <ide> return err <ide> } <ide> // Check if already exists <del> if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { <del> if err := root.Lchmod(fullPath, tf.permission); err != nil { <add> if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { <add> if err := driver.LocalDriver.Lchmod(fullPath, tf.permission); err != nil { <ide> return err <ide> } <ide> } <del> return driver.WriteFile(root, fullPath, tf.content, tf.permission) <add> return os.WriteFile(fullPath, tf.content, tf.permission) <ide> } <ide> <ide> func initWithFiles(files ...FileApplier) layerInit { <ide> func TestMountAndRegister(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> b, err := driver.ReadFile(path2, filepath.Join(path2.Path(), "testfile.txt")) <add> b, err := os.ReadFile(filepath.Join(path2.Path(), "testfile.txt")) <ide> if err != nil { <ide> t.Fatal(err) <ide> } <ide> func TestStoreRestore(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> if err := driver.WriteFile(pathFS, filepath.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil { <add> if err := os.WriteFile(filepath.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil { <ide> t.Fatal(err) <ide> } <ide> <ide> func TestStoreRestore(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> b, err := driver.ReadFile(pathFS, filepath.Join(pathFS.Path(), "testfile.txt")) <add> b, err := os.ReadFile(filepath.Join(pathFS.Path(), "testfile.txt")) <ide> if err != nil { <ide> t.Fatal(err) <ide> } <ide><path>layer/mount_test.go <ide> package layer // import "github.com/docker/docker/layer" <ide> <ide> import ( <ide> "io" <add> "os" <ide> "path/filepath" <ide> "runtime" <ide> "sort" <ide> func TestMountInit(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> fi, err := pathFS.Stat(filepath.Join(pathFS.Path(), "testfile.txt")) <add> fi, err := os.Stat(filepath.Join(pathFS.Path(), "testfile.txt")) <ide> if err != nil { <ide> t.Fatal(err) <ide> } <ide> <del> f, err := pathFS.Open(filepath.Join(pathFS.Path(), "testfile.txt")) <add> f, err := os.Open(filepath.Join(pathFS.Path(), "testfile.txt")) <ide> if err != nil { <ide> t.Fatal(err) <ide> } <ide> func TestMountSize(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> if err := driver.WriteFile(pathFS, filepath.Join(pathFS.Path(), "file2"), content2, 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(pathFS.Path(), "file2"), content2, 0755); err != nil { <ide> t.Fatal(err) <ide> } <ide> <ide> func TestMountChanges(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> if err := pathFS.Lchmod(filepath.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil { <add> if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil { <ide> t.Fatal(err) <ide> } <ide> <del> if err := driver.WriteFile(pathFS, filepath.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil { <add> if err := os.WriteFile(filepath.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil { <ide> t.Fatal(err) <ide> } <ide> <del> if err := pathFS.Remove(filepath.Join(pathFS.Path(), "testfile2.txt")); err != nil { <add> if err := os.Remove(filepath.Join(pathFS.Path(), "testfile2.txt")); err != nil { <ide> t.Fatal(err) <ide> } <ide> <del> if err := pathFS.Lchmod(filepath.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil { <add> if err := driver.LocalDriver.Lchmod(filepath.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil { <ide> t.Fatal(err) <ide> } <ide> <del> if err := driver.WriteFile(pathFS, filepath.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil { <add> if err := os.WriteFile(filepath.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil { <ide> t.Fatal(err) <ide> } <ide> <ide> func TestMountApply(t *testing.T) { <ide> t.Fatal(err) <ide> } <ide> <del> f, err := pathFS.Open(filepath.Join(pathFS.Path(), "newfile.txt")) <add> f, err := os.Open(filepath.Join(pathFS.Path(), "newfile.txt")) <ide> if err != nil { <ide> t.Fatal(err) <ide> } <ide><path>pkg/containerfs/containerfs.go <ide> package containerfs // import "github.com/docker/docker/pkg/containerfs" <ide> import ( <ide> "path/filepath" <ide> <del> "github.com/containerd/continuity/driver" <ide> "github.com/moby/sys/symlink" <ide> ) <ide> <ide> type ContainerFS interface { <ide> // Path returns the path to the root. Note that this may not exist <ide> // on the local system, so the continuity operations must be used <ide> Path() string <del> <del> // Driver provides methods to manipulate files <del> driver.Driver <ide> } <ide> <ide> // NewLocalContainerFS is a helper function to implement daemon's Mount interface <ide> // when the graphdriver mount point is a local path on the machine. <ide> func NewLocalContainerFS(path string) ContainerFS { <ide> return &local{ <del> path: path, <del> Driver: driver.LocalDriver, <add> path: path, <ide> } <ide> } <ide> <ide> type local struct { <ide> path string <del> driver.Driver <ide> } <ide> <ide> func (l *local) Path() string {
14
Ruby
Ruby
hoist update for clarity
9794e85351243cac6d4e78adaba634b8e4ecad0a
<ide><path>activestorage/app/models/active_storage/blob/identifiable.rb <ide> <ide> module ActiveStorage::Blob::Identifiable <ide> def identify <del> ActiveStorage::Identification.new(self).apply <add> update!(content_type: identification.content_type, identified: true) unless identified? <ide> end <ide> <ide> def identified? <ide> identified <ide> end <add> <add> private <add> def identification <add> ActiveStorage::Identification.new self <add> end <ide> end <ide><path>activestorage/app/models/active_storage/identification.rb <ide> def initialize(blob) <ide> @blob = blob <ide> end <ide> <del> def apply <del> blob.update!(content_type: content_type, identified: true) unless blob.identified? <add> def content_type <add> Marcel::MimeType.for(identifiable_chunk, name: filename, declared_type: declared_content_type) <ide> end <ide> <ide> private <del> def content_type <del> Marcel::MimeType.for(identifiable_chunk, name: filename, declared_type: declared_content_type) <del> end <del> <del> <ide> def identifiable_chunk <ide> Net::HTTP.start(uri.host, uri.port, use_ssl: uri.scheme == "https") do |client| <ide> client.get(uri, "Range" => "bytes=0-4095").body
2
Python
Python
allow run_validators() to handle non-dict types.
587058e3c25aac4d871828a3ef19637eb9e8ddbd
<ide><path>rest_framework/serializers.py <ide> def run_validators(self, value): <ide> """ <ide> Add read_only fields with defaults to value before running validators. <ide> """ <del> to_validate = self._read_only_defaults() <del> to_validate.update(value) <add> if isinstance(value, dict): <add> to_validate = self._read_only_defaults() <add> to_validate.update(value) <add> else: <add> to_validate = value <ide> super(Serializer, self).run_validators(to_validate) <ide> <ide> def to_internal_value(self, data): <ide><path>tests/test_serializer.py <ide> def __len__(self): <ide> assert serializer.validated_data == {'char': 'abc', 'integer': 123} <ide> assert serializer.errors == {} <ide> <add> def test_custom_to_internal_value(self): <add> """ <add> to_internal_value() is expected to return a dict, but subclasses may <add> return application specific type. <add> """ <add> class Point(object): <add> def __init__(self, srid, x, y): <add> self.srid = srid <add> self.coords = (x, y) <add> <add> # Declares a serializer that converts data into an object <add> class NestedPointSerializer(serializers.Serializer): <add> longitude = serializers.FloatField(source='x') <add> latitude = serializers.FloatField(source='y') <add> <add> def to_internal_value(self, data): <add> kwargs = super(NestedPointSerializer, self).to_internal_value(data) <add> return Point(srid=4326, **kwargs) <add> <add> serializer = NestedPointSerializer(data={'longitude': 6.958307, 'latitude': 50.941357}) <add> assert serializer.is_valid() <add> assert isinstance(serializer.validated_data, Point) <add> assert serializer.validated_data.srid == 4326 <add> assert serializer.validated_data.coords[0] == 6.958307 <add> assert serializer.validated_data.coords[1] == 50.941357 <add> assert serializer.errors == {} <add> <ide> <ide> class TestValidateMethod: <ide> def test_non_field_error_validate_method(self):
2
PHP
PHP
fix psalm error
02765f12cad459cc3f35bc776e4e54c967e33044
<ide><path>src/TestSuite/Schema/SchemaGenerator.php <ide> */ <ide> namespace Cake\TestSuite\Schema; <ide> <add>use Cake\Database\Connection; <ide> use Cake\Database\Schema\TableSchema; <ide> use Cake\Datasource\ConnectionManager; <ide> use RuntimeException; <ide> public function reload(?array $tables = null): void <ide> <ide> $config = include $this->file; <ide> $connection = ConnectionManager::get($this->connection); <add> if (!($connection instanceof Connection)) { <add> throw new RuntimeException("The `{$this->connection}` connection is not a Cake\Database\Connection"); <add> } <ide> <ide> foreach ($config as $metadata) { <ide> $table = new TableSchema($metadata['table'], $metadata['columns']);
1
Java
Java
introduce aot variant of beanpostprocessor
2141373c432dfc18bb665ec4f54b1e3c3b121c99
<ide><path>spring-beans/src/main/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanPostProcessor.java <ide> import java.lang.reflect.Constructor; <ide> import java.lang.reflect.Field; <ide> import java.lang.reflect.InvocationTargetException; <add>import java.lang.reflect.Member; <ide> import java.lang.reflect.Method; <ide> import java.lang.reflect.Modifier; <ide> import java.util.ArrayList; <ide> import java.util.Arrays; <add>import java.util.Collection; <ide> import java.util.Collections; <ide> import java.util.Iterator; <ide> import java.util.LinkedHashSet; <ide> import org.apache.commons.logging.Log; <ide> import org.apache.commons.logging.LogFactory; <ide> <add>import org.springframework.aot.generator.CodeContribution; <add>import org.springframework.aot.hint.ExecutableMode; <ide> import org.springframework.beans.BeanUtils; <ide> import org.springframework.beans.BeansException; <ide> import org.springframework.beans.PropertyValues; <ide> import org.springframework.beans.factory.InjectionPoint; <ide> import org.springframework.beans.factory.NoSuchBeanDefinitionException; <ide> import org.springframework.beans.factory.UnsatisfiedDependencyException; <add>import org.springframework.beans.factory.annotation.InjectionMetadata.InjectedElement; <ide> import org.springframework.beans.factory.config.ConfigurableListableBeanFactory; <ide> import org.springframework.beans.factory.config.DependencyDescriptor; <ide> import org.springframework.beans.factory.config.SmartInstantiationAwareBeanPostProcessor; <add>import org.springframework.beans.factory.generator.BeanInstanceAotPostProcessor; <add>import org.springframework.beans.factory.generator.BeanInstanceContributor; <add>import org.springframework.beans.factory.generator.InjectionGenerator; <ide> import org.springframework.beans.factory.support.LookupOverride; <ide> import org.springframework.beans.factory.support.MergedBeanDefinitionPostProcessor; <ide> import org.springframework.beans.factory.support.RootBeanDefinition; <ide> import org.springframework.lang.Nullable; <ide> import org.springframework.util.Assert; <ide> import org.springframework.util.ClassUtils; <add>import org.springframework.util.ObjectUtils; <ide> import org.springframework.util.ReflectionUtils; <ide> import org.springframework.util.StringUtils; <ide> <ide> * @see Value <ide> */ <ide> public class AutowiredAnnotationBeanPostProcessor implements SmartInstantiationAwareBeanPostProcessor, <del> MergedBeanDefinitionPostProcessor, PriorityOrdered, BeanFactoryAware { <add> MergedBeanDefinitionPostProcessor, BeanInstanceAotPostProcessor, PriorityOrdered, BeanFactoryAware { <ide> <ide> protected final Log logger = LogFactory.getLog(getClass()); <ide> <ide> public void setBeanFactory(BeanFactory beanFactory) { <ide> <ide> @Override <ide> public void postProcessMergedBeanDefinition(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName) { <add> findInjectionMetadata(beanName, beanType, beanDefinition); <add> } <add> <add> @Override <add> public BeanInstanceContributor buildAotContributor(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName) { <add> InjectionMetadata metadata = findInjectionMetadata(beanName, beanType, beanDefinition); <add> Collection<InjectedElement> injectedElements = metadata.getInjectedElements(); <add> return (!ObjectUtils.isEmpty(injectedElements) <add> ? new AutowiredAnnotationBeanInstanceContributor(injectedElements) <add> : BeanInstanceContributor.NO_OP); <add> } <add> <add> private InjectionMetadata findInjectionMetadata(String beanName, Class<?> beanType, RootBeanDefinition beanDefinition) { <ide> InjectionMetadata metadata = findAutowiringMetadata(beanName, beanType, null); <ide> metadata.checkConfigMembers(beanDefinition); <add> return metadata; <ide> } <ide> <ide> @Override <ide> private Object[] resolveMethodArguments(Method method, Object bean, @Nullable St <ide> } <ide> } <ide> <add> private static final class AutowiredAnnotationBeanInstanceContributor implements BeanInstanceContributor { <add> <add> private final Collection<InjectedElement> injectedElements; <add> <add> private final InjectionGenerator generator; <add> <add> AutowiredAnnotationBeanInstanceContributor(Collection<InjectedElement> injectedElements) { <add> this.injectedElements = injectedElements; <add> this.generator = new InjectionGenerator(); <add> } <add> <add> @Override <add> public void contribute(CodeContribution contribution) { <add> this.injectedElements.forEach(element -> { <add> boolean isRequired = isRequired(element); <add> Member member = element.getMember(); <add> analyzeMember(contribution, member); <add> contribution.statements().addStatement(this.generator.writeInjection(member, isRequired)); <add> }); <add> } <add> <add> private boolean isRequired(InjectedElement element) { <add> if (element instanceof AutowiredMethodElement injectedMethod) { <add> return injectedMethod.required; <add> } <add> else if (element instanceof AutowiredFieldElement injectedField) { <add> return injectedField.required; <add> } <add> return true; <add> } <add> <add> private void analyzeMember(CodeContribution contribution, Member member) { <add> if (member instanceof Method method) { <add> contribution.runtimeHints().reflection().registerMethod(method, <add> hint -> hint.setModes(ExecutableMode.INTROSPECT)); <add> contribution.protectedAccess().analyze(member, <add> this.generator.getProtectedAccessInjectionOptions(member)); <add> } <add> else if (member instanceof Field field) { <add> contribution.runtimeHints().reflection().registerField(field); <add> contribution.protectedAccess().analyze(member, <add> this.generator.getProtectedAccessInjectionOptions(member)); <add> } <add> } <add> <add> } <ide> <ide> /** <ide> * DependencyDescriptor variant with a pre-resolved target bean name. <ide><path>spring-beans/src/main/java/org/springframework/beans/factory/annotation/InjectionMetadata.java <ide> /* <del> * Copyright 2002-2021 the original author or authors. <add> * Copyright 2002-2022 the original author or authors. <ide> * <ide> * Licensed under the Apache License, Version 2.0 (the "License"); <ide> * you may not use this file except in compliance with the License. <ide> public InjectionMetadata(Class<?> targetClass, Collection<InjectedElement> eleme <ide> } <ide> <ide> <add> /** <add> * Return the {@link InjectedElement elements} to inject. <add> * @return the elements to inject <add> */ <add> public Collection<InjectedElement> getInjectedElements() { <add> return Collections.unmodifiableCollection(this.injectedElements); <add> } <add> <ide> /** <ide> * Determine whether this metadata instance needs to be refreshed. <ide> * @param clazz the current target class <ide><path>spring-beans/src/main/java/org/springframework/beans/factory/generator/BeanInstanceAotPostProcessor.java <add>/* <add> * Copyright 2002-2022 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * https://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add> <add>package org.springframework.beans.factory.generator; <add> <add>import org.springframework.beans.factory.config.BeanPostProcessor; <add>import org.springframework.beans.factory.support.RootBeanDefinition; <add> <add>/** <add> * Strategy interface to be implemented by a {@link BeanPostProcessor} that <add> * participates in a bean instance setup and can provide an equivalent setup <add> * using generated code. <add> * <add> * <p>Contrary to other bean post processors, implementations of this interface <add> * are instantiated at build-time and should not rely on other beans in the <add> * context. <add> * <add> * @author Stephane Nicoll <add> * @since 6.0 <add> */ <add>@FunctionalInterface <add>public interface BeanInstanceAotPostProcessor extends BeanPostProcessor { <add> <add> /** <add> * Build a {@link BeanInstanceContributor} for the given bean definition. <add> * @param beanDefinition the merged bean definition for the bean <add> * @param beanType the actual type of the managed bean instance <add> * @param beanName the name of the bean <add> * @return the contributor to use <add> */ <add> BeanInstanceContributor buildAotContributor(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName); <add> <add>} <ide><path>spring-beans/src/test/java/org/springframework/beans/factory/annotation/AutowiredAnnotationBeanInstanceContributorTests.java <add>/* <add> * Copyright 2002-2022 the original author or authors. <add> * <add> * Licensed under the Apache License, Version 2.0 (the "License"); <add> * you may not use this file except in compliance with the License. <add> * You may obtain a copy of the License at <add> * <add> * https://www.apache.org/licenses/LICENSE-2.0 <add> * <add> * Unless required by applicable law or agreed to in writing, software <add> * distributed under the License is distributed on an "AS IS" BASIS, <add> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <add> * See the License for the specific language governing permissions and <add> * limitations under the License. <add> */ <add> <add>package org.springframework.beans.factory.annotation; <add> <add>import org.junit.jupiter.api.Test; <add> <add>import org.springframework.aot.generator.CodeContribution; <add>import org.springframework.aot.generator.DefaultCodeContribution; <add>import org.springframework.aot.hint.ExecutableMode; <add>import org.springframework.aot.hint.RuntimeHints; <add>import org.springframework.aot.hint.TypeReference; <add>import org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessorTests.ResourceInjectionBean; <add>import org.springframework.beans.factory.generator.BeanInstanceContributor; <add>import org.springframework.beans.factory.support.RootBeanDefinition; <add>import org.springframework.beans.testfixture.beans.TestBean; <add>import org.springframework.core.env.Environment; <add>import org.springframework.javapoet.support.CodeSnippet; <add> <add>import static org.assertj.core.api.Assertions.assertThat; <add> <add>/** <add> * Tests for code contribution of {@link AutowiredAnnotationBeanPostProcessor}. <add> * <add> * @author Stephane Nicoll <add> */ <add>class AutowiredAnnotationBeanInstanceContributorTests { <add> <add> @Test <add> void buildAotContributorWithPackageProtectedFieldInjection() { <add> CodeContribution contribution = contribute(PackageProtectedFieldInjectionSample.class); <add> assertThat(CodeSnippet.process(contribution.statements().toCodeBlock())).isEqualTo(""" <add> instanceContext.field("environment", Environment.class) <add> .invoke(beanFactory, (attributes) -> bean.environment = attributes.get(0))"""); <add> assertThat(contribution.runtimeHints().reflection().typeHints()).singleElement().satisfies(typeHint -> { <add> assertThat(typeHint.getType()).isEqualTo(TypeReference.of(PackageProtectedFieldInjectionSample.class)); <add> assertThat(typeHint.fields()).singleElement().satisfies(fieldHint -> { <add> assertThat(fieldHint.getName()).isEqualTo("environment"); <add> assertThat(fieldHint.isAllowWrite()).isTrue(); <add> assertThat(fieldHint.isAllowUnsafeAccess()).isFalse(); <add> }); <add> }); <add> assertThat(contribution.protectedAccess().getPrivilegedPackageName("com.example")) <add> .isEqualTo(PackageProtectedFieldInjectionSample.class.getPackageName()); <add> } <add> <add> @Test <add> void buildAotContributorWithPrivateFieldInjection() { <add> CodeContribution contribution = contribute(PrivateFieldInjectionSample.class); <add> assertThat(CodeSnippet.process(contribution.statements().toCodeBlock())).isEqualTo(""" <add> instanceContext.field("environment", Environment.class) <add> .invoke(beanFactory, (attributes) -> { <add> Field environmentField = ReflectionUtils.findField(AutowiredAnnotationBeanInstanceContributorTests.PrivateFieldInjectionSample.class, "environment", Environment.class); <add> ReflectionUtils.makeAccessible(environmentField); <add> ReflectionUtils.setField(environmentField, bean, attributes.get(0)); <add> })"""); <add> assertThat(contribution.runtimeHints().reflection().typeHints()).singleElement().satisfies(typeHint -> { <add> assertThat(typeHint.getType()).isEqualTo(TypeReference.of(PrivateFieldInjectionSample.class)); <add> assertThat(typeHint.fields()).singleElement().satisfies(fieldHint -> { <add> assertThat(fieldHint.getName()).isEqualTo("environment"); <add> assertThat(fieldHint.isAllowWrite()).isTrue(); <add> assertThat(fieldHint.isAllowUnsafeAccess()).isFalse(); <add> }); <add> }); <add> assertThat(contribution.protectedAccess().isAccessible("com.example")).isTrue(); <add> } <add> <add> @Test <add> void buildAotContributorWithPublicMethodInjection() { <add> CodeContribution contribution = contribute(PublicMethodInjectionSample.class); <add> assertThat(CodeSnippet.process(contribution.statements().toCodeBlock())).isEqualTo(""" <add> instanceContext.method("setTestBean", TestBean.class) <add> .invoke(beanFactory, (attributes) -> bean.setTestBean(attributes.get(0)))"""); <add> assertThat(contribution.runtimeHints().reflection().typeHints()).singleElement().satisfies(typeHint -> { <add> assertThat(typeHint.getType()).isEqualTo(TypeReference.of(PublicMethodInjectionSample.class)); <add> assertThat(typeHint.methods()).singleElement().satisfies(methodHint -> { <add> assertThat(methodHint.getName()).isEqualTo("setTestBean"); <add> assertThat(methodHint.getModes()).contains(ExecutableMode.INTROSPECT); <add> }); <add> }); <add> assertThat(contribution.protectedAccess().isAccessible("com.example")).isTrue(); <add> } <add> <add> @Test <add> void buildAotContributorWithInjectionPoints() { <add> CodeContribution contribution = contribute(ResourceInjectionBean.class); <add> assertThat(CodeSnippet.process(contribution.statements().toCodeBlock())).isEqualTo(""" <add> instanceContext.field("testBean", TestBean.class) <add> .resolve(beanFactory, false).ifResolved((attributes) -> { <add> Field testBeanField = ReflectionUtils.findField(AutowiredAnnotationBeanPostProcessorTests.ResourceInjectionBean.class, "testBean", TestBean.class); <add> ReflectionUtils.makeAccessible(testBeanField); <add> ReflectionUtils.setField(testBeanField, bean, attributes.get(0)); <add> }); <add> instanceContext.method("setTestBean2", TestBean.class) <add> .invoke(beanFactory, (attributes) -> bean.setTestBean2(attributes.get(0)));"""); <add> assertThat(contribution.runtimeHints().reflection().typeHints()).singleElement().satisfies(typeHint -> { <add> assertThat(typeHint.fields()).singleElement().satisfies(fieldHint -> <add> assertThat(fieldHint.getName()).isEqualTo("testBean")); <add> assertThat(typeHint.methods()).singleElement().satisfies(methodHint -> <add> assertThat(methodHint.getName()).isEqualTo("setTestBean2")); <add> }); <add> assertThat(contribution.protectedAccess().isAccessible("com.example")).isTrue(); <add> } <add> <add> @Test <add> void buildAotContributorWithoutInjectionPoints() { <add> BeanInstanceContributor contributor = createAotContributor(String.class); <add> assertThat(contributor).isNotNull().isSameAs(BeanInstanceContributor.NO_OP); <add> } <add> <add> private DefaultCodeContribution contribute(Class<?> type) { <add> BeanInstanceContributor contributor = createAotContributor(type); <add> assertThat(contributor).isNotNull(); <add> DefaultCodeContribution contribution = new DefaultCodeContribution(new RuntimeHints()); <add> contributor.contribute(contribution); <add> return contribution; <add> } <add> <add> private BeanInstanceContributor createAotContributor(Class<?> type) { <add> AutowiredAnnotationBeanPostProcessor bpp = new AutowiredAnnotationBeanPostProcessor(); <add> RootBeanDefinition beanDefinition = new RootBeanDefinition(type); <add> return bpp.buildAotContributor(beanDefinition, type, "test"); <add> } <add> <add> <add> public static class PackageProtectedFieldInjectionSample { <add> <add> @Autowired <add> Environment environment; <add> <add> } <add> <add> public static class PrivateFieldInjectionSample { <add> <add> @Autowired <add> private Environment environment; <add> <add> } <add> <add> public static class PublicMethodInjectionSample { <add> <add> @Autowired <add> public void setTestBean(TestBean testBean) { <add> <add> } <add> <add> public void setUnrelated(String unrelated) { <add> <add> } <add> <add> } <add> <add> <add>}
4
Javascript
Javascript
fix treatment of some values as non-empty
66877216bd833face753d9a5d573ad477895d880
<ide><path>lib/url.js <ide> Object.defineProperty(Url.prototype, 'port', { <ide> return null; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._port = -1; <ide> if (this._host) <ide> this._host = null; <ide> Object.defineProperty(Url.prototype, 'path', { <ide> return (p == null && s) ? ('/' + s) : null; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._pathname = this._search = null; <ide> return; <ide> } <ide> Object.defineProperty(Url.prototype, 'protocol', { <ide> return proto; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._protocol = null; <ide> } else { <ide> var proto = '' + v; <ide> Object.defineProperty(Url.prototype, 'href', { <ide> var parsesQueryStrings = this._parsesQueryStrings; <ide> // Reset properties. <ide> Url.call(this); <del> if (v !== null && v !== '') <add> if (!isConsideredEmpty(v)) <ide> this.parse('' + v, parsesQueryStrings, false); <ide> }, <ide> enumerable: true, <ide> Object.defineProperty(Url.prototype, 'auth', { <ide> return this._auth; <ide> }, <ide> set: function(v) { <del> this._auth = v === null ? null : '' + v; <add> this._auth = isConsideredEmpty(v) ? null : '' + v; <ide> this._href = ''; <ide> }, <ide> enumerable: true, <ide> Object.defineProperty(Url.prototype, 'host', { <ide> return this._host; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._port = -1; <ide> this._hostname = this._host = null; <ide> } else { <ide> Object.defineProperty(Url.prototype, 'hostname', { <ide> return this._hostname; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._hostname = null; <ide> <ide> if (this._hasValidPort()) <ide> Object.defineProperty(Url.prototype, 'hash', { <ide> return this._hash; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v) || v === '#') { <ide> this._hash = null; <ide> } else { <ide> var hash = '' + v; <ide> Object.defineProperty(Url.prototype, 'search', { <ide> return this._search; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v) || v === '?') { <ide> this._search = this._query = null; <ide> } else { <ide> var search = escapeSearch('' + v); <ide> Object.defineProperty(Url.prototype, 'pathname', { <ide> return this._pathname; <ide> }, <ide> set: function(v) { <del> if (v === null) { <add> if (isConsideredEmpty(v)) { <ide> this._pathname = null; <ide> } else { <ide> var pathname = escapePathName('' + v).replace(/\\/g, '/'); <ide> Object.defineProperty(Url.prototype, 'pathname', { <ide> configurable: true <ide> }); <ide> <add>function isConsideredEmpty(value) { <add> return value === null || value === undefined || value === ''; <add>} <add> <ide> // Search `char1` (integer code for a character) in `string` <ide> // starting from `fromIndex` and ending at `string.length - 1` <ide> // or when a stop character is found. <ide><path>test/parallel/test-url-accessors.js <ide> const accessorTests = [{ <ide> }, { <ide> // Setting href to non-null non-string coerces to string <ide> url: 'google', <del> set: {href: undefined}, <add> set: {href: 0}, <ide> test: { <del> path: 'undefined', <del> href: 'undefined' <add> path: '0', <add> href: '0' <ide> } <ide> }, { <ide> // Setting port is reflected in host <ide> const accessorTests = [{ <ide> url: 'http://www.google.com', <ide> set: {search: ''}, <ide> test: { <del> search: '?', <del> path: '/?' <add> search: null, <add> path: '/' <ide> } <ide> }, { <ide> <ide> const accessorTests = [{ <ide> }, { <ide> <ide> // Empty hash is ok <del> url: 'http://www.google.com', <add> url: 'http://www.google.com#hash', <ide> set: {hash: ''}, <ide> test: { <del> hash: '#' <add> hash: null, <add> href: 'http://www.google.com/' <ide> } <ide> }, { <ide> <ide> const accessorTests = [{ <ide> url: 'http://www.google.com', <ide> set: {pathname: ''}, <ide> test: { <del> pathname: '/' <add> pathname: null, <add> href: 'http://www.google.com' <ide> } <ide> }, { <ide> // Null path is ok <ide> const accessorTests = [{ <ide> protocol: null <ide> } <ide> }, { <del> // Empty protocol is invalid <add> // Empty protocol is ok <ide> url: 'http://www.google.com/path', <ide> set: {protocol: ''}, <ide> test: { <del> protocol: 'http:' <add> protocol: null, <add> href: '//www.google.com/path' <ide> } <ide> }, { <ide> // Set query to an object <ide> const accessorTests = [{ <ide> url: 'http://www.google.com/path?key=value', <ide> set: {path: '?key2=value2'}, <ide> test: { <del> pathname: '/', <add> pathname: null, <ide> search: '?key2=value2', <del> href: 'http://www.google.com/?key2=value2' <add> href: 'http://www.google.com?key2=value2' <ide> } <ide> }, { <ide> // path is reflected in search and pathname 3 <ide> const accessorTests = [{ <ide> search: null, <ide> href: 'http://www.google.com' <ide> } <add>}, { <add> // setting hash to '' removes any hash <add> url: 'http://www.google.com/#hash', <add> set: {hash: ''}, <add> test: { <add> hash: null, <add> href: 'http://www.google.com/' <add> } <add>}, { <add> // setting hash to '#' removes any hash <add> url: 'http://www.google.com/#hash', <add> set: {hash: '#'}, <add> test: { <add> hash: null, <add> href: 'http://www.google.com/' <add> } <add>}, { <add> // setting search to '' removes any search <add> url: 'http://www.google.com/?search', <add> set: {search: ''}, <add> test: { <add> search: null, <add> href: 'http://www.google.com/' <add> } <add>}, { <add> // setting search to '?' removes any search <add> url: 'http://www.google.com/?search', <add> set: {search: '?'}, <add> test: { <add> search: null, <add> href: 'http://www.google.com/' <add> } <ide> } <ide> <ide> ];
2
Ruby
Ruby
fix new gcc lib directory
532460c098d7417a7477f7f54e790133880e8d26
<ide><path>Library/Homebrew/extend/os/linux/keg_relocate.rb <ide> def change_rpath(file, old_prefix, new_prefix) <ide> <ide> lib_path = "#{new_prefix}/lib" <ide> rpath << lib_path unless rpath.include? lib_path <del> rpath.prepend HOMEBREW_PREFIX/"opt/gcc/lib/current" if rpath.any? { |rp| rp.match?(%r{lib/gcc/\d+$}) } <add> <add> # Add GCC's lib directory (as of GCC 12+) to RPATH when there is existing linkage. <add> # This fixes linkage for newly-poured bottles. <add> # TODO: Replace with <add> # rpath.map! { |path| path = path.sub(%r{lib/gcc/\d+$}, "lib/gcc/current") } <add> # when Homebrew/homebrew-core#106755 is merged. <add> rpath.prepend HOMEBREW_PREFIX/"opt/gcc/lib/gcc/current" if rpath.any? { |rp| rp.match?(%r{lib/gcc/\d+$}) } <ide> <ide> rpath.join(":") <ide> end
1
Text
Text
add python 3.5
7d7175935545885b4bdbb2dd2eb63907342dbe2c
<ide><path>README.md <ide> Supports <ide> <ide> * CPython 2.7 <ide> * CPython 3.4 <add>* CPython 3.5 <ide> * OSX <ide> * Linux <ide> * Cygwin
1
Text
Text
recognize background-image as correct
28efec9e631d7ebd44f3e86a891dbb7832b84a55
<ide><path>curriculum/challenges/english/01-responsive-web-design/applied-visual-design/create-texture-by-adding-a-subtle-pattern-as-a-background-image.md <ide> Your `body` element should have a `background` property set to a `url()` with th <ide> ```js <ide> assert( <ide> code.match( <del> /background:\s*?url\(\s*("|'|)https:\/\/cdn-media-1\.freecodecamp\.org\/imgr\/MJAkxbh\.png\1\s*\)/gi <add> /background(-image)?:\s*?url\(\s*("|'|)https:\/\/cdn-media-1\.freecodecamp\.org\/imgr\/MJAkxbh\.png\2\s*\)/gi <ide> ) <ide> ); <ide> ```
1
Javascript
Javascript
copy files for canadian i18n
b58b1a7affa688feb64ec89534b004271021d347
<ide><path>lang/en-ca.js <add>// moment.js language configuration <add>// language : great britain english (en-gb) <add>// author : Chris Gedrim : https://github.com/chrisgedrim <add>(function () { <add> var lang = { <add> months : "January_February_March_April_May_June_July_August_September_October_November_December".split("_"), <add> monthsShort : "Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"), <add> weekdays : "Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"), <add> weekdaysShort : "Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"), <add> weekdaysMin : "Su_Mo_Tu_We_Th_Fr_Sa".split("_"), <add> longDateFormat : { <add> LT : "h:mm A", <add> L : "DD/MM/YYYY", <add> LL : "D MMMM YYYY", <add> LLL : "D MMMM YYYY LT", <add> LLLL : "dddd, D MMMM YYYY LT" <add> }, <add> calendar : { <add> sameDay : '[Today at] LT', <add> nextDay : '[Tomorrow at] LT', <add> nextWeek : 'dddd [at] LT', <add> lastDay : '[Yesterday at] LT', <add> lastWeek : '[last] dddd [at] LT', <add> sameElse : 'L' <add> }, <add> relativeTime : { <add> future : "in %s", <add> past : "%s ago", <add> s : "a few seconds", <add> m : "a minute", <add> mm : "%d minutes", <add> h : "an hour", <add> hh : "%d hours", <add> d : "a day", <add> dd : "%d days", <add> M : "a month", <add> MM : "%d months", <add> y : "a year", <add> yy : "%d years" <add> }, <add> ordinal : function (number) { <add> var b = number % 10; <add> return (~~ (number % 100 / 10) === 1) ? 'th' : <add> (b === 1) ? 'st' : <add> (b === 2) ? 'nd' : <add> (b === 3) ? 'rd' : 'th'; <add> } <add> }; <add> <add> // Node <add> if (typeof module !== 'undefined' && module.exports) { <add> module.exports = lang; <add> } <add> // Browser <add> if (typeof window !== 'undefined' && this.moment && this.moment.lang) { <add> this.moment.lang('en-gb', lang); <add> } <add>}()); <ide><path>lang/fr-ca.js <add>// moment.js language configuration <add>// language : french (fr) <add>// author : John Fischer : https://github.com/jfroffice <add>(function () { <add> var lang = { <add> months : "janvier_février_mars_avril_mai_juin_juillet_août_septembre_octobre_novembre_décembre".split("_"), <add> monthsShort : "janv._févr._mars_avr._mai_juin_juil._août_sept._oct._nov._déc.".split("_"), <add> weekdays : "dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"), <add> weekdaysShort : "dim._lun._mar._mer._jeu._ven._sam.".split("_"), <add> weekdaysMin : "Di_Lu_Ma_Me_Je_Ve_Sa".split("_"), <add> longDateFormat : { <add> LT : "HH:mm", <add> L : "DD/MM/YYYY", <add> LL : "D MMMM YYYY", <add> LLL : "D MMMM YYYY LT", <add> LLLL : "dddd D MMMM YYYY LT" <add> }, <add> calendar : { <add> sameDay: "[Aujourd'hui à] LT", <add> nextDay: '[Demain à] LT', <add> nextWeek: 'dddd [à] LT', <add> lastDay: '[Hier à] LT', <add> lastWeek: 'dddd [dernier à] LT', <add> sameElse: 'L' <add> }, <add> relativeTime : { <add> future : "dans %s", <add> past : "il y a %s", <add> s : "quelques secondes", <add> m : "une minute", <add> mm : "%d minutes", <add> h : "une heure", <add> hh : "%d heures", <add> d : "un jour", <add> dd : "%d jours", <add> M : "un mois", <add> MM : "%d mois", <add> y : "une année", <add> yy : "%d années" <add> }, <add> ordinal : function (number) { <add> return number === 1 ? 'er' : 'ème'; <add> } <add> }; <add> <add> // Node <add> if (typeof module !== 'undefined' && module.exports) { <add> module.exports = lang; <add> } <add> // Browser <add> if (typeof window !== 'undefined' && this.moment && this.moment.lang) { <add> this.moment.lang('fr', lang); <add> } <add>}());
2
Javascript
Javascript
add a getter for moment.lang()
fd8da1c0f7d282a4a68738376cb25ed2683f50c5
<ide><path>moment.js <ide> var moment, <ide> round = Math.round, <ide> languages = {}, <add> currentLanguage = 'en', <ide> hasModule = (typeof module !== 'undefined'), <ide> paramsToParse = 'months|monthsShort|monthsParse|weekdays|weekdaysShort|longDateFormat|calendar|relativeTime|ordinal|meridiem'.split('|'), <ide> i, <ide> param, <ide> req, <ide> parse = []; <add> if (!key) { <add> return currentLanguage; <add> } <ide> if (values) { <ide> for (i = 0; i < 12; i++) { <ide> parse[i] = new RegExp('^' + values.months[i] + '|^' + values.monthsShort[i].replace('.', ''), 'i'); <ide> param = paramsToParse[i]; <ide> moment[param] = languages[key][param] || languages.en[param]; <ide> } <add> currentLanguage = key; <ide> } else { <ide> if (hasModule) { <ide> req = require('./lang/' + key); <ide><path>test/moment/lang.js <add>var moment = require("../../moment"); <add> <add>exports.lang = { <add> "getter" : function(test) { <add> test.expect(4); <add> <add> moment.lang('en'); <add> test.equal(moment.lang(), 'en', 'Lang should return en by default'); <add> <add> moment.lang('fr'); <add> test.equal(moment.lang(), 'fr', 'Lang should return the changed language'); <add> <add> moment.lang('en-gb'); <add> test.equal(moment.lang(), 'en-gb', 'Lang should return the changed language'); <add> <add> moment.lang('en'); <add> test.equal(moment.lang(), 'en', 'Lang should reset'); <add> <add> test.done(); <add> } <add>};
2
Python
Python
fix numpy.isin for timedelta dtype
f9bed20bffd88bce06dbc8be200179edfe7580a4
<ide><path>numpy/lib/arraysetops.py <ide> def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): <ide> # Ensure that iteration through object arrays yields size-1 arrays <ide> if ar2.dtype == object: <ide> ar2 = ar2.reshape(-1, 1) <del> # Convert booleans to uint8 so we can use the fast integer algorithm <del> if ar1.dtype == bool: <del> ar1 = ar1 + np.uint8(0) <del> if ar2.dtype == bool: <del> ar2 = ar2 + np.uint8(0) <del> <del> # Check if we can use a fast integer algorithm: <del> integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and <del> np.issubdtype(ar2.dtype, np.integer)) <ide> <ide> if kind not in {None, 'sort', 'table'}: <ide> raise ValueError( <ide> f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") <ide> <del> if integer_arrays and kind in {None, 'table'}: <add> # Can use the table method if all arrays are integers or boolean: <add> is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) <add> use_table_method = is_int_arrays and kind in {None, 'table'} <ide> <add> if use_table_method: <ide> if ar2.size == 0: <ide> if invert: <ide> return np.ones_like(ar1, dtype=bool) <ide> else: <ide> return np.zeros_like(ar1, dtype=bool) <ide> <add> # Convert booleans to uint8 so we can use the fast integer algorithm <add> if ar1.dtype == bool: <add> ar1 = ar1.astype(np.uint8) <add> if ar2.dtype == bool: <add> ar2 = ar2.astype(np.uint8) <add> <ide> ar2_min = np.min(ar2) <ide> ar2_max = np.max(ar2) <ide> <ide><path>numpy/lib/tests/test_arraysetops.py <ide> def test_in1d_boolean(self, kind): <ide> assert_array_equal(np.invert(expected), <ide> in1d(a, b, invert=True, kind=kind)) <ide> <add> @pytest.mark.parametrize("kind", [None, "sort"]) <add> def test_in1d_timedelta(self, kind): <add> """Test that in1d works for timedelta input""" <add> rstate = np.random.RandomState(0) <add> a = rstate.randint(0, 100, size=10) <add> b = rstate.randint(0, 100, size=10) <add> truth = in1d(a, b) <add> a_timedelta = a.astype("timedelta64[s]") <add> b_timedelta = b.astype("timedelta64[s]") <add> assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind)) <add> <add> def test_in1d_table_timedelta_fails(self): <add> a = np.array([0, 1, 2], dtype="timedelta64[s]") <add> b = a <add> # Make sure it raises a value error: <add> with pytest.raises(ValueError): <add> in1d(a, b, kind="table") <add> <add> @pytest.mark.parametrize("kind", [None, "sort", "table"]) <add> def test_in1d_mixed_boolean(self, kind): <add> """Test that in1d works as expected for bool/int input.""" <add> for dtype in np.typecodes["AllInteger"]: <add> a = np.array([True, False, False], dtype=bool) <add> b = np.array([1, 1, 1, 1], dtype=dtype) <add> expected = np.array([True, False, False], dtype=bool) <add> assert_array_equal(in1d(a, b, kind=kind), expected) <add> <add> a, b = b, a <add> expected = np.array([True, True, True, True], dtype=bool) <add> assert_array_equal(in1d(a, b, kind=kind), expected) <add> <ide> def test_in1d_first_array_is_object(self): <ide> ar1 = [None] <ide> ar2 = np.array([1]*10)
2
Text
Text
remove unnecessary comments
87b0e5bb7a7265b60425b38560006540e5efbe46
<ide><path>curriculum/challenges/english/02-javascript-algorithms-and-data-structures/basic-javascript/add-new-properties-to-a-javascript-object.md <ide> You should add the property `bark` to `myDog`. <ide> assert(myDog.bark !== undefined); <ide> ``` <ide> <del>You should not add `bark` to the setup section. <add>You should not add `bark` to the initialization of `myDog`. <ide> <ide> ```js <ide> assert(!/bark[^\n]:/.test(code)); <ide> assert(!/bark[^\n]:/.test(code)); <ide> ## --seed-contents-- <ide> <ide> ```js <del>// Setup <ide> var myDog = { <ide> "name": "Happy Coder", <ide> "legs": 4, <ide> "tails": 1, <ide> "friends": ["freeCodeCamp Campers"] <ide> }; <ide> <del>// Only change code below this line <add> <ide> ``` <ide> <ide> # --solutions--
1
Go
Go
use filepath.walkdir instead of filepath.walk
1870d5f4aa54ec43a86ca3404bb5652c5acc0926
<ide><path>builder/dockerfile/copy.go <ide> func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { <ide> func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { <ide> root := o.source.Root() <ide> var copyInfos []copyInfo <del> if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { <add> if err := filepath.WalkDir(root, func(path string, _ os.DirEntry, err error) error { <ide> if err != nil { <ide> return err <ide> } <ide> func walkSource(source builder.Source, origPath string) ([]string, error) { <ide> } <ide> // Must be a dir <ide> var subfiles []string <del> err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { <add> err = filepath.WalkDir(fp, func(path string, _ os.DirEntry, err error) error { <ide> if err != nil { <ide> return err <ide> } <ide><path>builder/dockerfile/copy_unix.go <ide> func fixPermissions(source, destination string, identity idtools.Identity, overr <ide> <ide> // We Walk on the source rather than on the destination because we don't <ide> // want to change permissions on things we haven't created or modified. <del> return filepath.Walk(source, func(fullpath string, _ os.FileInfo, _ error) error { <add> return filepath.WalkDir(source, func(fullpath string, _ os.DirEntry, _ error) error { <ide> // Do not alter the walk root iff. it existed before, as it doesn't fall under <ide> // the domain of "things we should chown". <ide> if skipChownRoot && source == fullpath { <ide><path>builder/remotecontext/tarsum.go <ide> func (cs *CachableSource) Scan() error { <ide> return err <ide> } <ide> txn := iradix.New().Txn() <del> err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error { <add> err = filepath.WalkDir(cs.root, func(path string, _ os.DirEntry, err error) error { <ide> if err != nil { <ide> return errors.Wrapf(err, "failed to walk %s", path) <ide> }
3
PHP
PHP
return sqlsrv in place of dblib driver
7310ce44efc018a157ddbaf3aa2b8b768b617cab
<ide><path>src/Illuminate/Database/DatabaseManager.php <ide> public function setDefaultConnection($name) <ide> */ <ide> public function supportedDrivers() <ide> { <del> return ['mysql', 'pgsql', 'sqlite', 'dblib', 'sqlsrv']; <add> return ['mysql', 'pgsql', 'sqlite', 'sqlsrv']; <ide> } <ide> <ide> /** <ide> public function supportedDrivers() <ide> */ <ide> public function availableDrivers() <ide> { <del> return array_intersect($this->supportedDrivers(), PDO::getAvailableDrivers()); <add> return array_intersect($this->supportedDrivers(), str_replace('dblib', 'sqlsrv', PDO::getAvailableDrivers())); <ide> } <ide> <ide> /**
1
Go
Go
update bundle extension
61e2d4240ba266518d0a9a862a88201e54502a88
<ide><path>api/client/stack/deploy.go <ide> func newDeployCommand(dockerCli *client.DockerCli) *cobra.Command { <ide> cmd := &cobra.Command{ <ide> Use: "deploy [OPTIONS] STACK", <ide> Aliases: []string{"up"}, <del> Short: "Create and update a stack", <add> Short: "Create and update a stack from a Distributed Application Bundle (DAB)", <ide> Args: cli.ExactArgs(1), <ide> RunE: func(cmd *cobra.Command, args []string) error { <ide> opts.namespace = args[0] <ide><path>api/client/stack/opts.go <ide> func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { <ide> flags.StringVarP( <ide> opt, <ide> "bundle", "f", "", <del> "Path to a bundle (Default: STACK.dsb)") <add> "Path to a Distributed Application Bundle file (Default: STACK.dab)") <ide> } <ide> <ide> func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { <del> defaultPath := fmt.Sprintf("%s.dsb", namespace) <add> defaultPath := fmt.Sprintf("%s.dab", namespace) <ide> <ide> if path == "" { <ide> path = defaultPath
2
Go
Go
remove error from engine.new()
7100ace42bda2660d1eaecb2ec096ba6753688ea
<ide><path>api/server/server_unit_test.go <ide> func TesthttpError(t *testing.T) { <ide> } <ide> <ide> func TestGetVersion(t *testing.T) { <del> eng := tmpEngine(t) <add> eng := engine.New() <ide> var called bool <ide> eng.Register("version", func(job *engine.Job) engine.Status { <ide> called = true <ide> func TestGetVersion(t *testing.T) { <ide> } <ide> <ide> func TestGetInfo(t *testing.T) { <del> eng := tmpEngine(t) <add> eng := engine.New() <ide> var called bool <ide> eng.Register("info", func(job *engine.Job) engine.Status { <ide> called = true <ide> func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t * <ide> return r <ide> } <ide> <del>func tmpEngine(t *testing.T) *engine.Engine { <del> eng, err := engine.New() <del> if err != nil { <del> t.Fatal(err) <del> } <del> return eng <del>} <del> <ide> func readEnv(src io.Reader, t *testing.T) *engine.Env { <ide> out := engine.NewOutput() <ide> v, err := out.AddEnv() <ide><path>docker/docker.go <ide> func main() { <ide> log.Fatalf("Unable to get the full path to root (%s): %s", root, err) <ide> } <ide> } <del> <ide> if err := checkKernelAndArch(); err != nil { <ide> log.Fatal(err) <ide> } <del> eng, err := engine.New() <del> if err != nil { <del> log.Fatal(err) <del> } <add> <add> eng := engine.New() <ide> // Load builtins <ide> builtins.Register(eng) <ide> // load the daemon in the background so we can immediately start <ide><path>engine/engine.go <ide> func (eng *Engine) Register(name string, handler Handler) error { <ide> } <ide> <ide> // New initializes a new engine. <del>func New() (*Engine, error) { <add>func New() *Engine { <ide> eng := &Engine{ <ide> handlers: make(map[string]Handler), <ide> id: utils.RandomString(), <ide> func New() (*Engine, error) { <ide> for k, v := range globalHandlers { <ide> eng.handlers[k] = v <ide> } <del> return eng, nil <add> return eng <ide> } <ide> <ide> func (eng *Engine) String() string { <ide><path>engine/engine_test.go <ide> func TestRegister(t *testing.T) { <ide> // Register is global so let's cleanup to avoid conflicts <ide> defer unregister("dummy1") <ide> <del> eng := newTestEngine(t) <add> eng := New() <ide> <ide> //Should fail because global handlers are copied <ide> //at the engine creation <ide> func TestRegister(t *testing.T) { <ide> } <ide> <ide> func TestJob(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> job1 := eng.Job("dummy1", "--level=awesome") <ide> <ide> if job1.handler != nil { <ide> func TestJob(t *testing.T) { <ide> } <ide> <ide> func TestEngineCommands(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> handler := func(job *Job) Status { return StatusOK } <ide> eng.Register("foo", handler) <ide> eng.Register("bar", handler) <ide> func TestEngineCommands(t *testing.T) { <ide> } <ide> <ide> func TestEngineString(t *testing.T) { <del> eng1 := newTestEngine(t) <del> eng2 := newTestEngine(t) <add> eng1 := New() <add> eng2 := New() <ide> s1 := eng1.String() <ide> s2 := eng2.String() <ide> if eng1 == eng2 { <ide> func TestEngineString(t *testing.T) { <ide> } <ide> <ide> func TestEngineLogf(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> input := "Test log line" <ide> if n, err := eng.Logf("%s\n", input); err != nil { <ide> t.Fatal(err) <ide> func TestEngineLogf(t *testing.T) { <ide> } <ide> <ide> func TestParseJob(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> // Verify that the resulting job calls to the right place <ide> var called bool <ide> eng.Register("echo", func(job *Job) Status { <ide><path>engine/helpers_test.go <ide> import ( <ide> <ide> var globalTestID string <ide> <del>func newTestEngine(t *testing.T) *Engine { <del> eng, err := New() <del> if err != nil { <del> t.Fatal(err) <del> } <del> return eng <del>} <del> <ide> func mkJob(t *testing.T, name string, args ...string) *Job { <del> return newTestEngine(t).Job(name, args...) <add> return New().Job(name, args...) <ide> } <ide><path>engine/job_test.go <ide> import ( <ide> ) <ide> <ide> func TestJobStatusOK(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> eng.Register("return_ok", func(job *Job) Status { return StatusOK }) <ide> err := eng.Job("return_ok").Run() <ide> if err != nil { <ide> func TestJobStatusOK(t *testing.T) { <ide> } <ide> <ide> func TestJobStatusErr(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> eng.Register("return_err", func(job *Job) Status { return StatusErr }) <ide> err := eng.Job("return_err").Run() <ide> if err == nil { <ide> func TestJobStatusErr(t *testing.T) { <ide> } <ide> <ide> func TestJobStatusNotFound(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) <ide> err := eng.Job("return_not_found").Run() <ide> if err == nil { <ide> func TestJobStatusNotFound(t *testing.T) { <ide> } <ide> <ide> func TestJobStdoutString(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> // FIXME: test multiple combinations of output and status <ide> eng.Register("say_something_in_stdout", func(job *Job) Status { <ide> job.Printf("Hello world\n") <ide> func TestJobStdoutString(t *testing.T) { <ide> } <ide> <ide> func TestJobStderrString(t *testing.T) { <del> eng := newTestEngine(t) <add> eng := New() <ide> // FIXME: test multiple combinations of output and status <ide> eng.Register("say_something_in_stderr", func(job *Job) Status { <ide> job.Errorf("Warning, something might happen\nHere it comes!\nOh no...\nSomething happened\n") <ide><path>integration/utils_test.go <ide> func newTestEngine(t utils.Fataler, autorestart bool, root string) *engine.Engin <ide> } <ide> } <ide> os.MkdirAll(root, 0700) <del> eng, err := engine.New() <del> if err != nil { <del> t.Fatal(err) <del> } <add> <add> eng := engine.New() <ide> // Load default plugins <ide> builtins.Register(eng) <ide> // (This is manually copied and modified from main() until we have a more generic plugin system)
7
Text
Text
change links from http to https
a6ed1e51b7f2ad569032a6829c64c18782ab2b77
<ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/24-game.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>The <a href="http://en.wikipedia.org/wiki/24_Game" target="_blank">24 Game</a> tests a person's mental arithmetic. <add>The <a href="https://en.wikipedia.org/wiki/24_Game" target="_blank">24 Game</a> tests a person's mental arithmetic. <ide> The aim of the game is to arrange four numbers in a way that when evaluated, the result is 24 <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/9-billion-names-of-god-the-integer.english.md <ide> This task is a variation of the <a href='https://en.wikipedia.org/wiki/The Nine <ide> In detail, to specify what is meant by a “name”: <ide> <ul> <ide> <li>The integer 1 has 1 name “1”.</li> <del> <li>The integer 2 has 2 names “1+1”, and “2”.</li> <add> <li>The integer 2 has 2 names “1+1” and “2”.</li> <ide> <li>The integer 3 has 3 names “1+1+1”, “2+1”, and “3”.</li> <ide> <li>The integer 4 has 5 names “1+1+1+1”, “2+1+1”, “2+2”, “3+1”, “4”.</li> <ide> <li>The integer 5 has 7 names “1+1+1+1+1”, “2+1+1+1”, “2+2+1”, “3+1+1”, “3+2”, “4+1”, “5”.</li> <ide> This can be visualized in the following form: <ide> 1 3 3 2 1 1 <ide> </pre> <ide> Where row $n$ corresponds to integer $n$, and each column $C$ in row $m$ from left to right corresponds to the number of names beginning with $C$. <del>Optionally note that the sum of the $n$-th row $P(n)$ is the <a href="http://mathworld.wolfram.com/PartitionFunctionP.html" title="link: http://mathworld.wolfram.com/PartitionFunctionP.html">integer partition function</a>. <add>Optionally note that the sum of the $n$-th row $P(n)$ is the <a href="https://mathworld.wolfram.com/PartitionFunctionP.html" title="link: https://mathworld.wolfram.com/PartitionFunctionP.html">integer partition function</a>. <ide> </section> <ide> <ide> ## Instructions <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/abundant-deficient-and-perfect-number-classifications.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>These define three classifications of positive integers based on their <a href='http://rosettacode.org/wiki/Proper divisors' title='Proper divisors' target='_blank'>proper divisors</a>. <add>These define three classifications of positive integers based on their <a href='https://rosettacode.org/wiki/Proper divisors' title='Proper divisors' target='_blank'>proper divisors</a>. <ide> Let $P(n)$ be the sum of the proper divisors of <b>n</b> where proper divisors are all positive integers <b>n</b> other than <b>n</b> itself. <del><pre> <del>If <code style='border: 1px solid #ddd;'> P(n) < n </code> then <b>n</b> is classed as <b>deficient</b> <del>If <code style='border: 1px solid #ddd;'> P(n) === n </code> then <b>n</b> is classed as <b>perfect</b> <del>If <code style='border: 1px solid #ddd;'> P(n) > n </code> then <b>n</b> is classed as <b>abundant</b> <del></pre> <del>Example: <del><b>6</b> has proper divisors of <b>1</b>, <b>2</b>, and <b>3</b>. <del><b>1 + 2 + 3 = 6</b>, so <b>6</b> is classed as a perfect number. <add> <add>If <code>P(n) < n</code> then <code>n</code> is classed as <code>deficient</code> <add> <add>If <code>P(n) === n</code> then <code>n</code> is classed as <code>perfect</code> <add> <add>If <code>P(n) > n</code> then <code>n</code> is classed as <code>abundant</code> <add> <add><strong>Example</strong>: <add><strong>6</strong> has proper divisors of <strong>1</strong>, <strong>2</strong>, and <strong>3</strong>. <add><strong>1 + 2 + 3 = 6</strong>, so <strong>6</strong> is classed as a perfect number. <ide> </section> <ide> <ide> ## Instructions <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/accumulator-factory.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>A problem posed by <a href='http://en.wikipedia.org/wiki/Paul_Graham' target='_blank'>Paul Graham</a> is that of creating a function that takes a single (numeric) argument and which returns another function that is an accumulator. The returned accumulator function in turn also takes a single numeric argument, and returns the sum of all the numeric values passed in so far to that accumulator (including the initial value passed when the accumulator was created). <add>A problem posed by <a href='https://en.wikipedia.org/wiki/Paul_Graham' target='_blank'>Paul Graham</a> is that of creating a function that takes a single (numeric) argument and which returns another function that is an accumulator. The returned accumulator function in turn also takes a single numeric argument, and returns the sum of all the numeric values passed in so far to that accumulator (including the initial value passed when the accumulator was created). <ide> </section> <ide> <ide> ## Instructions <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/amicable-pairs.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Two integers $N$ and $M$ are said to be <a href='https://en.wikipedia.org/wiki/Amicable numbers' title='wp: Amicable numbers' target='_blank'>amicable pairs</a> if $N \neq M$ and the sum of the <a href="http://rosettacode.org/wiki/Proper divisors" title="Proper divisors">proper divisors</a> of $N$ ($\mathrm{sum}(\mathrm{propDivs}(N))$) $= M$ as well as $\mathrm{sum}(\mathrm{propDivs}(M)) = N$. <add>Two integers $N$ and $M$ are said to be <a href='https://en.wikipedia.org/wiki/Amicable numbers' title='wp: Amicable numbers' target='_blank'>amicable pairs</a> if $N \neq M$ and the sum of the <a href="https://rosettacode.org/wiki/Proper divisors" title="Proper divisors">proper divisors</a> of $N$ ($\mathrm{sum}(\mathrm{propDivs}(N))$) $= M$ as well as $\mathrm{sum}(\mathrm{propDivs}(M)) = N$. <ide> <b>Example:</b> <ide> <b>1184</b> and <b>1210</b> are an amicable pair, with proper divisors: <ide> <ul> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/averages-pythagorean-means.english.md <ide> challengeType: 5 <ide> Compute all three of the <a class='rosetta__link--wiki' href='https://en.wikipedia.org/wiki/Pythagorean means' title='wp: Pythagorean means'>Pythagorean means</a> of the set of integers <big>1</big> through <big>10</big> (inclusive). <ide> Show that <big>$A(x_1,\ldots,x_n) \geq G(x_1,\ldots,x_n) \geq H(x_1,\ldots,x_n)$</big> for this set of positive integers. <ide> <ul> <del> <li>The most common of the three means, the <a class='rosetta__link--rosetta' href='http://rosettacode.org/wiki/Averages/Arithmetic mean' title='Averages/Arithmetic mean' target='_blank'>arithmetic mean</a>, is the sum of the list divided by its length:<br> <add> <li>The most common of the three means, the <a class='rosetta__link--rosetta' href='https://rosettacode.org/wiki/Averages/Arithmetic mean' title='Averages/Arithmetic mean' target='_blank'>arithmetic mean</a>, is the sum of the list divided by its length:<br> <ide> <big>$ A(x_1, \ldots, x_n) = \frac{x_1 + \cdots + x_n}{n}$</big></li> <ide> <li>The <a class='rosetta__link--wiki' href='https://en.wikipedia.org/wiki/Geometric mean' title='wp: Geometric mean' target='_blank'>geometric mean</a> is the $n$th root of the product of the list:<br> <ide> <big>$ G(x_1, \ldots, x_n) = \sqrt[n]{x_1 \cdots x_n} $</big></li> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/circles-of-given-radius-through-two-points.english.md <ide> Implement a function that takes two points and a radius and returns the two circ <ide> 0.1234, 0.9876 0.1234, 0.9876 0.0 <ide> </pre> <ide> <b>Ref:</b> <del><a href="http://mathforum.org/library/drmath/view/53027.html" title="link: http://mathforum.org/library/drmath/view/53027.html">Finding the Center of a Circle from 2 Points and Radius</a> from Math forum @ Drexel <add><a href="https://mathforum.org/library/drmath/view/53027.html" target="_blank">Finding the Center of a Circle from 2 Points and Radius</a> from Math forum @ Drexel <ide> </section> <ide> <ide> ## Tests <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/closest-pair-problem.english.md <ide> For the input, expect the argument to be an array of objects (points) with <code <ide> <b>References and further readings:</b> <ide> <ul> <ide> <li><a href="https://en.wikipedia.org/wiki/Closest pair of points problem" title="wp: Closest pair of points problem">Closest pair of points problem</a></li> <del> <li><a href="http://www.cs.mcgill.ca/~cs251/ClosestPair/ClosestPairDQ.html" title="link: http://www.cs.mcgill.ca/~cs251/ClosestPair/ClosestPairDQ.html">Closest Pair (McGill)</a></li> <del> <li><a href="http://www.cs.ucsb.edu/~suri/cs235/ClosestPair.pdf" title="link: http://www.cs.ucsb.edu/~suri/cs235/ClosestPair.pdf">Closest Pair (UCSB)</a></li> <del> <li><a href="http://classes.cec.wustl.edu/~cse241/handouts/closestpair.pdf" title="link: http://classes.cec.wustl.edu/~cse241/handouts/closestpair.pdf">Closest pair (WUStL)</a></li> <del> <li><a href="http://www.cs.iupui.edu/~xkzou/teaching/CS580/Divide-and-conquer-closestPair.ppt" title="link: http://www.cs.iupui.edu/~xkzou/teaching/CS580/Divide-and-conquer-closestPair.ppt">Closest pair (IUPUI)</a></li> <add> <li><a href="https://www.cs.mcgill.ca/~cs251/ClosestPair/ClosestPairDQ.html">Closest Pair (McGill)</a></li> <add> <li><a href="https://www.cs.ucsb.edu/~suri/cs235/ClosestPair.pdf">Closest Pair (UCSB)</a></li> <add> <li><a href="https://classes.cec.wustl.edu/~cse241/handouts/closestpair.pdf">Closest pair (WUStL)</a></li> <add> <li><a href="https://www.cs.iupui.edu/~xkzou/teaching/CS580/Divide-and-conquer-closestPair.ppt">Closest pair (IUPUI)</a></li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/combinations.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Given non-negative integers <big><b>m</b></big> and <big><b>n</b></big>, generate all size <big><b>m</b></big><a href="http://mathworld.wolfram.com/Combination.html" title="link: http://mathworld.wolfram.com/Combination.html"> combinations</a> of the integers from <big><b>0</b></big> (zero) to <big><b>n-1</b></big> in sorted order (each combination is sorted and the entire table is sorted). <add>Given non-negative integers <big><b>m</b></big> and <big><b>n</b></big>, generate all size <big><b>m</b></big><a href="https://mathworld.wolfram.com/Combination.html"> combinations</a> of the integers from <big><b>0</b></big> (zero) to <big><b>n-1</b></big> in sorted order (each combination is sorted and the entire table is sorted). <ide> <b>Example:</b> <ide> <big><b>3</b></big> comb <big><b>5</b></big> is: <ide> <pre> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/comma-quibbling.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Comma quibbling is a task originally set by Eric Lippert in his <a href="http://blogs.msdn.com/b/ericlippert/archive/2009/04/15/comma-quibbling.aspx" title="link: http://blogs.msdn.com/b/ericlippert/archive/2009/04/15/comma-quibbling.aspx" target="_blank">blog</a>. <add>Comma quibbling is a task originally set by Eric Lippert in his <a href="https://blogs.msdn.com/b/ericlippert/archive/2009/04/15/comma-quibbling.aspx" target="_blank">blog</a>. <ide> </section> <ide> <ide> ## Instructions <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/count-the-coins.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>There are four types of common coins in <a href="https://en.wikipedia.org/wiki/United_States" title="link: https://en.wikipedia.org/wiki/United_States">US</a> currency: <add>There are four types of common coins in <a href="https://en.wikipedia.org/wiki/United_States" target="_blank">US</a> currency: <ide> <ul> <ide> <li>quarters (25 cents)</li> <ide> <li>dimes (10 cents)</li> <ide> There are four types of common coins in <a href="https://en.wikipedia.org/wiki/U <ide> Implement a function to determine how many ways there are to make change for a dollar using these common coins (1 dollar = 100 cents) <ide> <b>Reference:</b> <ide> <ul> <del> <li><a href="http://mitpress.mit.edu/sicp/full-text/book/book-Z-H-11.html#%_sec_Temp_52" title="link: http://mitpress.mit.edu/sicp/full-text/book/book-Z-H-11.html#%_sec_Temp_52">an algorithm from MIT Press</a>.</li> <add> <li><a href="https://mitpress.mit.edu/sicp/full-text/book/book-Z-H-11.html#%_sec_Temp_52" target="_blank">an algorithm from MIT Press</a>.</li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/deal-cards-for-freecell.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del><i>FreeCell</i> is the solitaire card game that Paul Alfille introduced to the PLATO system in 1978. Jim Horne, at Microsoft, changed the name to FreeCell and reimplemented the game for <a href="http://rosettacode.org/wiki/DOS" title="DOS">DOS</a>, then <a href="http://rosettacode.org/wiki/Windows" title="Windows">Windows</a>. This version introduced 32000 numbered deals. (The <a href="http://www.solitairelaboratory.com/fcfaq.html" title="link: http://www.solitairelaboratory.com/fcfaq.html">FreeCell FAQ</a> tells this history.) <del>As the game became popular, Jim Horne disclosed <a href="http://www.solitairelaboratory.com/mshuffle.txt" title="link: http://www.solitairelaboratory.com/mshuffle.txt">the algorithm</a>, and other implementations of FreeCell began to reproduce the Microsoft deals. These deals are numbered from 1 to 32000. Newer versions from Microsoft have 1 million deals, numbered from 1 to 1000000; some implementations allow numbers outside that range. <del>The algorithm uses this <a href="http://rosettacode.org/wiki/linear congruential generator" title="linear congruential generator">linear congruential generator</a> from Microsoft C: <add><i>FreeCell</i> is the solitaire card game that Paul Alfille introduced to the PLATO system in 1978. Jim Horne, at Microsoft, changed the name to FreeCell and reimplemented the game for <a href="https://rosettacode.org/wiki/DOS" title="DOS">DOS</a>, then <a href="https://rosettacode.org/wiki/Windows" title="Windows">Windows</a>. This version introduced 32000 numbered deals. (The <a href="https://www.solitairelaboratory.com/fcfaq.html">FreeCell FAQ</a> tells this history.) <add>As the game became popular, Jim Horne disclosed <a href="https://www.solitairelaboratory.com/mshuffle.txt">the algorithm</a>, and other implementations of FreeCell began to reproduce the Microsoft deals. These deals are numbered from 1 to 32000. Newer versions from Microsoft have 1 million deals, numbered from 1 to 1000000; some implementations allow numbers outside that range. <add>The algorithm uses this <a href="https://rosettacode.org/wiki/linear congruential generator" title="linear congruential generator">linear congruential generator</a> from Microsoft C: <ide> <ul> <ide> <li>$state_{n + 1} \equiv 214013 \times state_n + 2531011 \pmod{2^{31}}$</li> <ide> <li>$rand_n = state_n \div 2^{16}$</li> <ide> The algorithm uses this <a href="http://rosettacode.org/wiki/linear congruential <ide> The algorithm follows: <ide> <ol> <ide> <li>Seed the RNG with the number of the deal. <del> <li>Create an <a href="http://rosettacode.org/wiki/array" title="array">array</a> of 52 cards: Ace of Clubs, Ace of Diamonds, Ace of Hearts, Ace of Spades, 2 of Clubs, 2 of Diamonds, and so on through the ranks: Ace, 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King. The array indexes are 0 to 51, with Ace of Clubs at 0, and King of Spades at 51.</li> <add> <li>Create an <a href="https://rosettacode.org/wiki/array" title="array">array</a> of 52 cards: Ace of Clubs, Ace of Diamonds, Ace of Hearts, Ace of Spades, 2 of Clubs, 2 of Diamonds, and so on through the ranks: Ace, 2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King. The array indexes are 0 to 51, with Ace of Clubs at 0, and King of Spades at 51.</li> <ide> <li>Until the array is empty:</li> <ide> <li>Choose a random card at index &equiv; next random number (mod array length).</li> <ide> <ul> <ide> The algorithm follows: <ide> ## Instructions <ide> <section id='instructions'> <ide> Write a function to take a deal number and deal cards in the same order as this algorithm. The function must return a two dimensional array representing the FreeCell board. <del>Deals can also be checked against <a href="http://freecellgamesolutions.com/" title="link: http://freecellgamesolutions.com/">FreeCell solutions to 1000000 games</a>. (Summon a video solution, and it displays the initial deal.) <add>Deals can also be checked against <a href="https://freecellgamesolutions.com/">FreeCell solutions to 1000000 games</a>. (Summon a video solution, and it displays the initial deal.) <ide> </section> <ide> <ide> ## Tests <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/execute-brain.english.md <ide> challengeType: 5 <ide> ## Description <ide> <section id='description'> <ide> Write a function to implement a Brain**** interpreter. The function will take a string as a parameter and should return a string as the output. More details are given below: <del>RCBF is a set of <a href="http://rosettacode.org/wiki/Brainf***" title="Brainf***">Brainf***</a> compilers and interpreters written for Rosetta Code in a variety of languages. <add>RCBF is a set of <a href="https://rosettacode.org/wiki/Brainf***" title="Brainf***">Brainf***</a> compilers and interpreters written for Rosetta Code in a variety of languages. <ide> Below are links to each of the versions of RCBF. <ide> An implementation need only properly implement the following instructions: <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/factors-of-a-mersenne-number.english.md <ide> challengeType: 5 <ide> A Mersenne number is a number in the form of 2<sup>P</sup>-1. <ide> If P is prime, the Mersenne number may be a Mersenne prime <ide> (if P is not prime, the Mersenne number is also not prime). <del>In the search for Mersenne prime numbers it is advantageous to eliminate exponents by finding a small factor before starting a, potentially lengthy, <a href="http://rosettacode.org/wiki/Lucas-Lehmer test" title="Lucas-Lehmer test" target="_blank">Lucas-Lehmer test</a>. <add>In the search for Mersenne prime numbers it is advantageous to eliminate exponents by finding a small factor before starting a, potentially lengthy, <a href="https://rosettacode.org/wiki/Lucas-Lehmer test" title="Lucas-Lehmer test" target="_blank">Lucas-Lehmer test</a>. <ide> There are very efficient algorithms for determining if a number divides 2<sup>P</sup>-1 (or equivalently, if 2<sup>P</sup> mod (the number) = 1). <ide> Some languages already have built-in implementations of this exponent-and-mod operation (called modPow or similar). <ide> The following is how to implement this modPow yourself: <ide> Since 2<sup>23</sup> mod 47 = 1, 47 is a factor of 2<sup>P</sup>-1. <ide> Since we've shown that 47 is a factor, 2<sup>23</sup>-1 is not prime. <ide> Further properties of Mersenne numbers allow us to refine the process even more. <ide> Any factor q of 2<sup>P</sup>-1 must be of the form 2kP+1, k being a positive integer or zero. Furthermore, q must be 1 or 7 mod 8. <del>Finally any potential factor q must be <a href="http://rosettacode.org/wiki/Primality by Trial Division" title="Primality by Trial Division" target="_blank">prime</a>. <add>Finally any potential factor q must be <a href="https://rosettacode.org/wiki/Primality by Trial Division" title="Primality by Trial Division" target="_blank">prime</a>. <ide> As in other trial division algorithms, the algorithm stops when 2kP+1 > sqrt(N).These primality tests only work on Mersenne numbers where P is prime. For example, M<sub>4</sub>=15 yields no factors using these techniques, but factors into 3 and 5, neither of which fit 2kP+1. <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/fibonacci-n-step-number-sequences.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>These number series are an expansion of the ordinary <a href="http://rosettacode.org/wiki/Fibonacci sequence" title="Fibonacci sequence" target="_blank">Fibonacci sequence</a> where: <add>These number series are an expansion of the ordinary <a href="https://rosettacode.org/wiki/Fibonacci sequence" title="Fibonacci sequence" target="_blank">Fibonacci sequence</a> where: <ide> <ol> <ide> <li>For $n = 2$ we have the Fibonacci sequence; with initial values $[1, 1]$ and $F_k^2 = F_{k-1}^2 + F_{k-2}^2$</li> <ide> <li>For $n = 3$ we have the tribonacci sequence; with initial values $[1, 1, 2]$ and $F_k^3 = F_{k-1}^3 + F_{k-2}^3 + F_{k-3}^3$</li> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/fibonacci-word.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>The Fibonacci Word may be created in a manner analogous to the Fibonacci Sequence <a href="http://hal.archives-ouvertes.fr/docs/00/36/79/72/PDF/The_Fibonacci_word_fractal.pdf" title="link: http://hal.archives-ouvertes.fr/docs/00/36/79/72/PDF/The_Fibonacci_word_fractal.pdf" target="_blank">as described here</a>: <add>The Fibonacci Word may be created in a manner analogous to the Fibonacci Sequence <a href="https://hal.archives-ouvertes.fr/docs/00/36/79/72/PDF/The_Fibonacci_word_fractal.pdf" target="_blank">as described here</a>: <ide> <pre> <ide> Define F_Word<sub>1</sub> as <b>1</b> <ide> Define F_Word<sub>2</sub> as <b>0</b> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/general-fizzbuzz.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Write a generalized version of <a href="http://rosettacode.org/wiki/FizzBuzz">FizzBuzz</a> that works for any list of factors, along with their words. <add>Write a generalized version of <a href="https://rosettacode.org/wiki/FizzBuzz">FizzBuzz</a> that works for any list of factors, along with their words. <ide> This is basically a "fizzbuzz" implementation where the rules of the game are supplied to the user. Create a function to implement this. The function should take two parameters. <ide> The first will be an array with the FizzBuzz rules. For example: <code>[ [3, "Fizz"] , [5, "Buzz"] ]</code>. <ide> This indcates that <code>Fizz</code> should be printed if the number is a multiple of 3 and <code>Buzz</code> if it is a multiple of 5. If it is a multiple of both then the strings should be concatenated in the order specified in the array. In this case, <code>FizzBuzz</code> if the number is a multiple of 3 and 5. <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/hailstone-sequence.english.md <ide> The hailstone sequence is also known as hailstone numbers (because the values ar <ide> </ol> <ide> <b>See also:</b> <ide> <ul> <del> <li><a href="http://xkcd.com/710" title="link: http://xkcd.com/710" target="_blank">xkcd</a> (humourous).</li> <add> <li><a href="https://xkcd.com/710" target="_blank">xkcd</a> (humourous).</li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/harshad-or-niven-series.english.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>The <a href="http://mathworld.wolfram.com/HarshadNumber.html" title="link: http://mathworld.wolfram.com/HarshadNumber.html" target="_blank">Harshad</a> or Niven numbers are positive integers ≥ 1 that are divisible by the sum of their digits. <del>For example, <b>42</b> is a <a href="http://rosettacode.org/wiki/oeis:A005349" title="oeis:A005349" target="_blank">Harshad number</a> as <b>42</b> is divisible by <b>(4 + 2)</b> without remainder. <add>The <a href="https://mathworld.wolfram.com/HarshadNumber.html" target="_blank">Harshad</a> or Niven numbers are positive integers ≥ 1 that are divisible by the sum of their digits. <add>For example, <b>42</b> is a <a href="https://rosettacode.org/wiki/oeis:A005349" title="oeis:A005349" target="_blank">Harshad number</a> as <b>42</b> is divisible by <b>(4 + 2)</b> without remainder. <ide> Assume that the series is defined as the numbers in increasing order. <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/hash-from-two-arrays.english.md <ide> challengeType: 5 <ide> Using two Arrays of equal length, create a Hash object where the elements from one array (the keys) are linked to the elements of the other (the values). <ide> <b>Related task:</b> <ide> <ul> <del> <li><a href="http://rosettacode.org/wiki/Associative arrays/Creation" title="Associative arrays/Creation" target="_blank">Associative arrays/Creation</a></li> <add> <li><a href="https://rosettacode.org/wiki/Associative arrays/Creation" title="Associative arrays/Creation" target="_blank">Associative arrays/Creation</a></li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/heronian-triangles.english.md <ide> challengeType: 5 <ide> <span style="margin-left: 2em;"><big>$A = \sqrt{s(s-a)(s-b)(s-c)},$</big></span> <ide> where <big>s</big> is half the perimeter of the triangle; that is, <ide> <span style="margin-left: 2em;"><big>$s=\frac{a+b+c}{2}.$</big></span> <del><a href="http://www.had2know.com/academics/heronian-triangles-generator-calculator.html" title="link: http://www.had2know.com/academics/heronian-triangles-generator-calculator.html" target="_blank">Heronian triangles</a> are triangles whose sides and area are all integers. <add><a href="https://www.had2know.com/academics/heronian-triangles-generator-calculator.html" target="_blank">Heronian triangles</a> are triangles whose sides and area are all integers. <ide> An example is the triangle with sides <b>3, 4, 5</b> whose area is <b>6</b> (and whose perimeter is <b>12</b>). <ide> Note that any triangle whose sides are all an integer multiple of <b>3, 4, 5</b>; such as <b>6, 8, 10,</b> will also be a Heronian triangle. <ide> Define a Primitive Heronian triangle as a Heronian triangle where the greatest common divisor <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/hofstadter-figure-figure-sequences.english.md <ide> No maximum value for <b>n</b> should be assumed. <ide> <b>References</b> <ide> <ul> <ide> <li> <del> Sloane's <a href="http://oeis.org/A005228" title="link: http://oeis.org/A005228" target="_blank">A005228</a> and <a href="http://oeis.org/A030124" title="link: http://oeis.org/A030124" target="_blank">A030124</a>. <add> Sloane's <a href="https://oeis.org/A005228" target="_blank">A005228</a> and <a href="https://oeis.org/A030124" target="_blank">A030124</a>. <ide> </li> <ide> <li> <del> <a href="http://mathworld.wolfram.com/HofstadterFigure-FigureSequence.html" title="link: http://mathworld.wolfram.com/HofstadterFigure-FigureSequence.html" target="_blank">Wolfram MathWorld</a> <add> <a href="https://mathworld.wolfram.com/HofstadterFigure-FigureSequence.html" target="_blank">Wolfram MathWorld</a> <ide> </li> <ide> <li> <ide> Wikipedia: <a href="https://en.wikipedia.org/wiki/Hofstadter_sequence#Hofstadter_Figure-Figure_sequences" title="wp: Hofstadter_sequence#Hofstadter_Figure-Figure_sequences" target="_blank">Hofstadter Figure-Figure sequences</a>. <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/hofstadter-q-sequence.english.md <ide> challengeType: 5 <ide> <section id='description'> <ide> The <a href="https://en.wikipedia.org/wiki/Hofstadter_sequence#Hofstadter_Q_sequence" title="wp: Hofstadter_sequence#Hofstadter_Q_sequence" target="_blank">Hofstadter Q sequence</a> is defined as: <ide> <span style="left-margin: 2em;">$Q(1)=Q(2)=1, \\ Q(n)=Q\big(n-Q(n-1)\big)+Q\big(n-Q(n-2)), \quad n>2.$</span> <del>It is defined like the <a href="http://rosettacode.org/wiki/Fibonacci sequence" title="Fibonacci sequence" target="_blank">Fibonacci sequence</a>, but whereas the next term in the Fibonacci sequence is the sum of the previous two terms, in the Q sequence the previous two terms tell you how far to go back in the Q sequence to find the two numbers to sum to make the next term of the sequence. <add>It is defined like the <a href="https://rosettacode.org/wiki/Fibonacci sequence" title="Fibonacci sequence" target="_blank">Fibonacci sequence</a>, but whereas the next term in the Fibonacci sequence is the sum of the previous two terms, in the Q sequence the previous two terms tell you how far to go back in the Q sequence to find the two numbers to sum to make the next term of the sequence. <ide> </section> <ide> <ide> ## Instructions <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/k-d-tree.md <ide> Otherwise, when k-d trees are used with high-dimensional data, most of the point <ide> <ide> ## Instructions <ide> <section id='instructions'> <del>Write a function to perform a nearest neighbour search using k-d tree. The function takes two parameters: an array of k-dimensional points, and a single k-dimensional point whose nearest neighbour should be returned by the function. A k-dimensional point will be given as an array of k elements. <add>Write a function to perform a nearest neighbor search using k-d tree. The function takes two parameters: an array of k-dimensional points, and a single k-dimensional point whose nearest neighbor should be returned by the function. A k-dimensional point will be given as an array of k elements. <ide> </section> <ide> <ide> ## Tests <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/kaprekar-numbers.md <ide> Note that a split resulting in a part consisting purely of 0s is not valid, as 0 <ide> Kaprekar numbers: <ide> <ul> <ide> <li> <code>2223</code> is a Kaprekar number, as <code>2223 * 2223 = 4941729</code>, <code>4941729</code> may be split to <code>494</code> and <code>1729</code>, and <code>494 + 1729 = 2223</code></li> <del><li>The series of Kaprekar numbers is known as <a href="http://rosettacode.org/wiki/oeis:A006886">A006886</a>, and begins as <code>1, 9, 45, 55, ...</code></li> <add><li>The series of Kaprekar numbers is known as <a href="https://rosettacode.org/wiki/oeis:A006886">A006886</a>, and begins as <code>1, 9, 45, 55, ...</code></li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/least-common-multiple.md <ide> challengeType: 5 <ide> <section id='description'> <ide> The least common multiple of 12 and 18 is 36, because 12 is a factor (12 &times; 3 = 36), and 18 is a factor (18 &times; 2 = 36), and there is no positive integer less than 36 that has both factors. As a special case, if either <i>m</i> or <i>n</i> is zero, then the least common multiple is zero. <ide> One way to calculate the least common multiple is to iterate all the multiples of <i>m</i>, until you find one that is also a multiple of <i>n</i>. <del>If you already have <i>gcd</i> for <a href="http://rosettacode.org/wiki/greatest common divisor">greatest common divisor</a>, then this formula calculates <i>lcm</i>. <add>If you already have <i>gcd</i> for <a href="https://rosettacode.org/wiki/greatest common divisor">greatest common divisor</a>, then this formula calculates <i>lcm</i>. <ide> \( \operatorname{lcm}(m, n) = \frac{|m \times n|}{\operatorname{gcd}(m, n)} \) <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/sailors-coconuts-and-a-monkey-problem.english.md <ide> Create a function that returns the minimum possible size of the initial pile of <ide> Of course the tale is told in a world where the collection of any amount of coconuts in a day and multiple divisions of the pile, etc. can occur in time fitting the story line, so as not to affect the mathematics. <ide> <b>C.f:</b> <ide> <ul> <del> <li><a href="https://www.youtube.com/watch?v=U9qU20VmvaU" title="link: https://www.youtube.com/watch?v=U9qU20VmvaU" target="_blank"> Monkeys and Coconuts - Numberphile</a> (Video) Analytical solution.</li> <del> <li><a href="http://oeis.org/A002021" title="link: http://oeis.org/A002021" target="_blank">A002021 Pile of coconuts problem</a> The On-Line Encyclopedia of Integer Sequences. (Although some of its references may use the alternate form of the tale).</li> <add> <li><a href="https://www.youtube.com/watch?v=U9qU20VmvaU" target="_blank"> Monkeys and Coconuts - Numberphile</a> (Video) Analytical solution.</li> <add> <li><a href="https://oeis.org/A002021" target="_blank">A002021 Pile of coconuts problem</a> The On-Line Encyclopedia of Integer Sequences. (Although some of its references may use the alternate form of the tale).</li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/sorting-algorithms-cocktail-sort.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>The cocktail shaker sort is an improvement on the <a href="http://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. The improvement is basically that values "bubble" both directions through the array, because on each iteration the cocktail shaker sort bubble sorts once forwards and once backwards. Pseudocode for the algorithm (from <a href="https://en.wikipedia.org/wiki/Cocktail sort" target="_blank">wikipedia</a>):</p> <add>The cocktail shaker sort is an improvement on the <a href="https://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. The improvement is basically that values "bubble" both directions through the array, because on each iteration the cocktail shaker sort bubble sorts once forwards and once backwards. Pseudocode for the algorithm (from <a href="https://en.wikipedia.org/wiki/Cocktail sort" target="_blank">wikipedia</a>):</p> <ide> <pre> <ide> <b>function</b> <i>cocktailSort</i>( A : list of sortable items ) <ide> <b>do</b> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/sorting-algorithms-comb-sort.md <ide> challengeType: 5 <ide> ## Description <ide> <section id='description'> <ide> Implement a <i>comb sort</i>. <del>The <b>Comb Sort</b> is a variant of the <a href="http://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. <del>Like the <a href="http://rosettacode.org/wiki/Shell sort" target="_blank">Shell sort</a>, the Comb Sort increases the gap used in comparisons and exchanges. <add>The <b>Comb Sort</b> is a variant of the <a href="https://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. <add>Like the <a href="https://rosettacode.org/wiki/Shell sort" target="_blank">Shell sort</a>, the Comb Sort increases the gap used in comparisons and exchanges. <ide> Dividing the gap by $(1-e^{-\varphi})^{-1} \approx 1.247330950103979$ works best, but 1.3 may be more practical. <ide> Some implementations use the insertion sort once the gap is less than a certain amount. <ide> <b>Also see</b> <ide> Pseudocode: <ide> gap := 1 <ide> <b>end if</b> <ide> i := 0 <del> swaps := 0 <i>//see <a href="http://rosettacode.org/wiki/Bubble Sort">Bubble Sort</a> for an explanation</i> <add> swaps := 0 <i>//see <a href="https://rosettacode.org/wiki/Bubble Sort">Bubble Sort</a> for an explanation</i> <ide> <i>//a single "comb" over the input list</i> <del> <b>loop until</b> i + gap >= input<b>.size</b> <i>//see <a href="http://rosettacode.org/wiki/Shell sort">Shell sort</a> for similar idea</i> <add> <b>loop until</b> i + gap >= input<b>.size</b> <i>//see <a href="https://rosettacode.org/wiki/Shell sort">Shell sort</a> for similar idea</i> <ide> <b>if</b> input[i] > input[i+gap] <ide> <b>swap</b>(input[i], input[i+gap]) <ide> swaps := 1 <i>// Flag a swap has occurred, so the</i> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/sorting-algorithms-gnome-sort.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Gnome sort is a sorting algorithm which is similar to <a href="http://rosettacode.org/wiki/Insertion sort" target="_blank">Insertion sort</a>, except that moving an element to its proper place is accomplished by a series of swaps, as in <a href="http://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. <add>Gnome sort is a sorting algorithm which is similar to <a href="https://rosettacode.org/wiki/Insertion sort" target="_blank">Insertion sort</a>, except that moving an element to its proper place is accomplished by a series of swaps, as in <a href="https://rosettacode.org/wiki/Bubble Sort" target="_blank">Bubble Sort</a>. <ide> The pseudocode for the algorithm is: <ide> <pre> <ide> <b>function</b> <i>gnomeSort</i>(a[0..size-1]) <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/soundex.md <ide> challengeType: 5 <ide> <section id='description'> <ide> Soundex is an algorithm for creating indices for words based on their pronunciation. <ide> The goal is for homophones to be encoded to the same representation so that they can be matched despite minor differences in spelling (from <a href="https://en.wikipedia.org/wiki/soundex" target="_blank">the WP article</a>). <del>There is a major issue in many of the implementations concerning the separation of two consonants that have the same soundex code! According to the official Rules <a href="http://rosettacode.org/wiki/https://www.archives.gov/research/census/soundex.html" target="_blank">https://www.archives.gov/research/census/soundex.html</a>. So check for instance if <b>Ashcraft</b> is coded to <b>A-261</b>. <add>There is a major issue in many of the implementations concerning the separation of two consonants that have the same soundex code! According to the <a href="https://www.archives.gov/research/census/soundex.html" target="_blank">official Rules</a>. So check for instance if <b>Ashcraft</b> is coded to <b>A-261</b>. <ide> <ul> <ide> <li>If a vowel (A, E, I, O, U) separates two consonants that have the same soundex code, the consonant to the right of the vowel is coded. Tymczak is coded as T-522 (T, 5 for the M, 2 for the C, Z ignored (see "Side-by-Side" rule above), 2 for the K). Since the vowel "A" separates the Z and K, the K is coded.</li> <ide> <li>If "H" or "W" separate two consonants that have the same soundex code, the consonant to the right of the vowel is not coded. Example: Ashcraft is coded A-261 (A, 2 for the S, C ignored, 6 for the R, 1 for the F). It is not coded A-226.</li> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/state-name-puzzle.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>This task is inspired by <a href="http://drdobbs.com/windows/198701685" target="_blank">Mark Nelson's DDJ Column "Wordplay"</a> and one of the weekly puzzle challenges from Will Shortz on NPR Weekend Edition <a href="http://www.npr.org/templates/story/story.php?storyId=9264290"target="_blank">[1]</a> and originally attributed to David Edelheit. <add>This task is inspired by <a href="https://drdobbs.com/windows/198701685" target="_blank">Mark Nelson's DDJ Column "Wordplay"</a> and one of the weekly puzzle challenges from Will Shortz on NPR Weekend Edition <a href="https://www.npr.org/templates/story/story.php?storyId=9264290"target="_blank">[1]</a> and originally attributed to David Edelheit. <ide> The challenge was to take the names of two U.S. States, mix them all together, then rearrange the letters to form the names of two <i>different</i> U.S. States (so that all four state names differ from one another). <ide> What states are these? <ide> The problem was reissued on <a href="https://tapestry.tucson.az.us/twiki/bin/view/Main/StateNamesPuzzle" target="_blank">the Unicon Discussion Web</a> which includes several solutions with analysis. Several techniques may be helpful and you may wish to refer to <a href="https://en.wikipedia.org/wiki/Goedel_numbering">Gödel numbering</a>, <a href="https://en.wikipedia.org/wiki/Equivalence_relation" target="_blank">equivalence relations</a>, and <a href="https://en.wikipedia.org/wiki/Equivalence_classes" target="_blank">equivalence classes</a>. The basic merits of these were discussed in the Unicon Discussion Web. <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/stern-brocot-sequence.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>For this task, the Stern-Brocot sequence is to be generated by an algorithm similar to that employed in generating the <a href="http://rosettacode.org/wiki/Fibonacci sequence" target="_blank">Fibonacci sequence</a>. <add>For this task, the Stern-Brocot sequence is to be generated by an algorithm similar to that employed in generating the <a href="https://rosettacode.org/wiki/Fibonacci sequence" target="_blank">Fibonacci sequence</a>. <ide> <ol> <ide> <li>The first and second members of the sequence are both 1:</li> <ide> <ul><li>1, 1</li></ul> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/subleq.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del><a href="http://rosettacode.org/wiki/eso:Subleq" target="_blank">Subleq</a> is an example of a <a href="https://en.wikipedia.org/wiki/One_instruction_set_computer" target="_blank">One-Instruction Set Computer (OISC)</a>. <add><a href="https://rosettacode.org/wiki/eso:Subleq" target="_blank">Subleq</a> is an example of a <a href="https://en.wikipedia.org/wiki/One_instruction_set_computer" target="_blank">One-Instruction Set Computer (OISC)</a>. <ide> It is named after its only instruction, which is <b>SU</b>btract and <b>B</b>ranch if <b>L</b>ess than or <b>EQ</b>ual <ide> to zero. <ide> Your task is to create an interpreter which emulates such a machine. <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/symmetric-difference.md <ide> challengeType: 5 <ide> <ide> ## Description <ide> <section id='description'> <del>Given two <a href="http://rosettacode.org/wiki/set" target="_blank">set</a>s <i>A</i> and <i>B</i>, compute $(A \setminus B) \cup (B \setminus A).$ <add>Given two <a href="https://rosettacode.org/wiki/set" target="_blank">set</a>s <i>A</i> and <i>B</i>, compute $(A \setminus B) \cup (B \setminus A).$ <ide> That is, enumerate the items that are in <i>A</i> or <i>B</i> but not both. This set is called the <a href="https://en.wikipedia.org/wiki/Symmetric difference" target="_blank">symmetric difference</a> of <i>A</i> and <i>B</i>. <ide> In other words: $(A \cup B) \setminus (A \cap B)$ (the set of items that are in at least one of <i>A</i> or <i>B</i> minus the set of items that are in both <i>A</i> and <i>B</i>). <ide> </section> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/taxicab-numbers.english.md <ide> Taxicab numbers are also known as: <ide> Write a function that returns the lowest <code>n</code> taxicab numbers. For each of the taxicab numbers, show the number as well as its constituent cubes. <ide> <b>See also:</b> <ide> <ul> <del> <li><a href="http://oeis.org/A001235" target="_blank">A001235 taxicab numbers</a> on The On-Line Encyclopedia of Integer Sequences.</li> <del> <li><a href="http://mathworld.wolfram.com/Hardy-RamanujanNumber.html" target="_blank">Hardy-Ramanujan Number</a> on MathWorld.</li> <del> <li><a href="http://mathworld.wolfram.com/TaxicabNumber.html" target="_blank">taxicab number</a> on MathWorld.</li> <add> <li><a href="https://oeis.org/A001235" target="_blank">A001235 taxicab numbers</a> on The On-Line Encyclopedia of Integer Sequences.</li> <add> <li><a href="https://mathworld.wolfram.com/Hardy-RamanujanNumber.html" target="_blank">Hardy-Ramanujan Number</a> on MathWorld.</li> <add> <li><a href="https://mathworld.wolfram.com/TaxicabNumber.html" target="_blank">taxicab number</a> on MathWorld.</li> <ide> <li><a href="https://en.wikipedia.org/wiki/Taxicab_number" target="_blank">taxicab number</a> on Wikipedia.</li> <ide> </ul> <ide> </section> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/topological-sort.english.md <ide> synopsys <ide> <small>Note: the above data would be un-orderable if, for example, <code>dw04</code> is added to the list of dependencies of <code>dw01</code>.</small> <ide> <b>C.f.:</b> <ide> <ul> <del> <li><a href="http://rosettacode.org/wiki/Topological sort/Extracted top item" title="Topological sort/Extracted top item" target="_blank">Topological sort/Extracted top item</a>.</li> <add> <li><a href="https://rosettacode.org/wiki/Topological sort/Extracted top item" title="Topological sort/Extracted top item" target="_blank">Topological sort/Extracted top item</a>.</li> <ide> </ul> <ide> There are two popular algorithms for topological sorting: <ide> <ul> <ide> <li><a href="https://en.wikipedia.org/wiki/Topological sorting" title="wp: Topological sorting" target="_blank">Kahn's 1962 topological sort</a></li> <del> <li><a href="http://www.embeddedrelated.com/showarticle/799.php" title="link: http://www.embeddedrelated.com/showarticle/799.php" target="_blank">depth-first search</a></li> <add> <li><a href="https://www.embeddedrelated.com/showarticle/799.php" target="_blank">depth-first search</a></li> <ide> </ul> <ide> </section> <ide> <ide><path>curriculum/challenges/english/08-coding-interview-prep/rosetta-code/y-combinator.english.md <ide> challengeType: 5 <ide> ## Description <ide> <section id='description'> <ide> In strict <a href="https://en.wikipedia.org/wiki/Functional programming" title="wp: functional programming" target="_blank">functional programming</a> and the <a href="https://en.wikipedia.org/wiki/lambda calculus" title="wp: lambda calculus" target="_blank">lambda calculus</a>, functions (lambda expressions) don't have state and are only allowed to refer to arguments of enclosing functions. This rules out the usual definition of a recursive function wherein a function is associated with the state of a variable and this variable's state is used in the body of the function. <del>The <a href="http://mvanier.livejournal.com/2897.html" target="_blank">Y combinator</a> is itself a stateless function that, when applied to another stateless function, returns a recursive version of the function. The Y combinator is the simplest of the class of such functions, called <a href="https://en.wikipedia.org/wiki/Fixed-point combinator" title="wp: fixed-point combinator" target="_blank">fixed-point combinators</a>. <add>The <a href="https://mvanier.livejournal.com/2897.html" target="_blank">Y combinator</a> is itself a stateless function that, when applied to another stateless function, returns a recursive version of the function. The Y combinator is the simplest of the class of such functions, called <a href="https://en.wikipedia.org/wiki/Fixed-point combinator" title="wp: fixed-point combinator" target="_blank">fixed-point combinators</a>. <ide> </section> <ide> <ide> ## Instructions <ide> <section id='instructions'> <ide> Define the stateless Y combinator function and use it to compute <a href="https://en.wikipedia.org/wiki/Factorial" title="wp: factorial">factorial</a>. The <code>factorial(N)</code> function is already given to you. <ide> <b>See also:</b> <ide> <ul> <del> <li><a href="http://vimeo.com/45140590" target="_blank">Jim Weirich: Adventures in Functional Programming</a>.</li> <add> <li><a href="https://vimeo.com/45140590" target="_blank">Jim Weirich: Adventures in Functional Programming</a>.</li> <ide> </ul> <ide> </section> <ide>
38
PHP
PHP
remove unneeded order lines
85c1081b26b7cd2411f5ab9b2b5c734e22931933
<ide><path>tests/Integration/Queue/ModelSerializationTest.php <ide> public function it_reloads_relationships() <ide> <ide> Line::create(['order_id' => $order->id, 'product_id' => $product1->id]); <ide> Line::create(['order_id' => $order->id, 'product_id' => $product2->id]); <del> Line::create(['order_id' => $order->id, 'product_id' => $product1->id]); <ide> <ide> $order->load('lines'); <ide> <ide> public function it_reloads_nested_relationships() <ide> <ide> Line::create(['order_id' => $order->id, 'product_id' => $product1->id]); <ide> Line::create(['order_id' => $order->id, 'product_id' => $product2->id]); <del> Line::create(['order_id' => $order->id, 'product_id' => $product1->id]); <ide> <ide> $order->load('lines', 'lines.product'); <ide>
1
Ruby
Ruby
add small dsl for defining comparators
158b7047e516b6dda176cb1745e23c7109d049e9
<ide><path>Library/Homebrew/version.rb <ide> def self._parse spec <ide> m = /_([^_]+)/.match(stem) <ide> return m.captures.first unless m.nil? <ide> end <add> <add> # DSL for defining comparators <add> class << self <add> def compare &blk <add> send(:define_method, '<=>', &blk) <add> end <add> end <ide> end <ide> <ide> class VersionSchemeDetector
1
Text
Text
remove broken 404 links
82e63e3d74b3f0d69b11f4c170e48ffd6f5db886
<ide><path>packages/next/README.md <ide> We have a list of [good first issues](https://github.com/vercel/next.js/labels/g <ide> <ide> ## Authors <ide> <del>- Tim Neutkens ([@timneutkens](https://twitter.com/timneutkens)) – [Vercel](https://vercel.com/about/timneutkens) <del>- Naoyuki Kanezawa ([@nkzawa](https://twitter.com/nkzawa)) – [Vercel](https://vercel.com/about/nkzawa) <del>- Guillermo Rauch ([@rauchg](https://twitter.com/rauchg)) – [Vercel](https://vercel.com/about/rauchg) <add>- Tim Neutkens ([@timneutkens](https://twitter.com/timneutkens)) <add>- Naoyuki Kanezawa ([@nkzawa](https://twitter.com/nkzawa)) <add>- Guillermo Rauch ([@rauchg](https://twitter.com/rauchg)) <ide> - Arunoda Susiripala ([@arunoda](https://twitter.com/arunoda)) <ide> - Tony Kovanen ([@tonykovanen](https://twitter.com/tonykovanen)) <ide> - Dan Zajdband ([@impronunciable](https://twitter.com/impronunciable))
1
Text
Text
use a descriptive text for the wikipedia link
20dc65f6cedf835fb66990749ee2de7b0990d7bf
<ide><path>guide/english/algorithms/algorithm-design-patterns/behavioral-patterns/index.md <ide> Examples of this type of design pattern include: <ide> 17. **Visitor pattern**: A way to separate an algorithm from an object. <ide> <ide> ### Sources <del>[https://en.wikipedia.org/wiki/Behavioral_pattern](https://en.wikipedia.org/wiki/Behavioral_pattern) <add>[Behavioral patterns - Wikipedia](https://en.wikipedia.org/wiki/Behavioral_pattern)
1
PHP
PHP
add test for disabled modulus
00735936dd29606fd088f3288dd51dd59dfd76d2
<ide><path>tests/TestCase/View/Helper/PaginatorHelperTest.php <ide> public function testNumbersModulus() <ide> 'pageCount' => 3, <ide> ] <ide> ]; <del> $options = ['modulus' => 10]; <del> $result = $this->Paginator->numbers($options); <add> <add> $result = $this->Paginator->numbers(['modulus' => 10]); <ide> $expected = [ <ide> ['li' => ['class' => 'active']], '<a href=""', '1', '/a', '/li', <ide> ['li' => []], ['a' => ['href' => '/index?page=2']], '2', '/a', '/li', <ide> public function testNumbersModulus() <ide> $this->assertHtml($expected, $result); <ide> } <ide> <add> /** <add> * Tests that disabling modulus displays all page links. <add> * <add> * @return void <add> */ <add> public function testModulusDisabled() <add> { <add> $this->Paginator->request->params['paging'] = [ <add> 'Client' => [ <add> 'page' => 4, <add> 'current' => 2, <add> 'count' => 30, <add> 'prevPage' => 1, <add> 'nextPage' => 1, <add> 'pageCount' => 6, <add> ] <add> ]; <add> <add> $result = $this->Paginator->numbers(['modulus' => false]); <add> $expected = [ <add> ['li' => []], '<a href="/index"', '1', '/a', '/li', <add> ['li' => []], ['a' => ['href' => '/index?page=2']], '2', '/a', '/li', <add> ['li' => []], ['a' => ['href' => '/index?page=3']], '3', '/a', '/li', <add> ['li' => ['class' => 'active']], ['a' => ['href' => '']], '4', '/a', '/li', <add> ['li' => []], ['a' => ['href' => '/index?page=5']], '5', '/a', '/li', <add> ['li' => []], ['a' => ['href' => '/index?page=6']], '6', '/a', '/li', <add> ]; <add> $this->assertHtml($expected, $result); <add> } <add> <ide> /** <ide> * Test that numbers() with url options. <ide> *
1
Mixed
Javascript
add support for vibration patterns
e20e8a3cc87cbb624add4de00149b1f55bbdcfba
<ide><path>Examples/UIExplorer/VibrationExample.js <ide> var { <ide> exports.framework = 'React'; <ide> exports.title = 'Vibration'; <ide> exports.description = 'Vibration API'; <del>exports.examples = [{ <del> title: 'Vibration.vibrate()', <del> render() { <del> return ( <del> <TouchableHighlight <del> style={styles.wrapper} <del> onPress={() => Vibration.vibrate()}> <del> <View style={styles.button}> <del> <Text>Vibrate</Text> <del> </View> <del> </TouchableHighlight> <del> ); <add>exports.examples = [ <add> { <add> title: 'Vibration.vibrate()', <add> render() { <add> return ( <add> <TouchableHighlight <add> style={styles.wrapper} <add> onPress={() => Vibration.vibrate()}> <add> <View style={styles.button}> <add> <Text>Vibrate</Text> <add> </View> <add> </TouchableHighlight> <add> ); <add> }, <ide> }, <del>}]; <add> { <add> title: 'Vibration.vibrate([0, 500, 200, 500])', <add> render() { <add> return ( <add> <TouchableHighlight <add> style={styles.wrapper} <add> onPress={() => Vibration.vibrate([0, 500, 200, 500])}> <add> <View style={styles.button}> <add> <Text>Vibrate once</Text> <add> </View> <add> </TouchableHighlight> <add> ); <add> }, <add> }, <add> { <add> title: 'Vibration.vibrate([0, 500, 200, 500], true)', <add> render() { <add> return ( <add> <TouchableHighlight <add> style={styles.wrapper} <add> onPress={() => Vibration.vibrate([0, 500, 200, 500], true)}> <add> <View style={styles.button}> <add> <Text>Vibrate until cancel</Text> <add> </View> <add> </TouchableHighlight> <add> ); <add> }, <add> }, <add> { <add> title: 'Vibration.cancel()', <add> render() { <add> return ( <add> <TouchableHighlight <add> style={styles.wrapper} <add> onPress={() => Vibration.cancel()}> <add> <View style={styles.button}> <add> <Text>Cancel</Text> <add> </View> <add> </TouchableHighlight> <add> ); <add> }, <add> }, <add>]; <ide> <ide> var styles = StyleSheet.create({ <ide> wrapper: { <ide><path>Libraries/Vibration/Vibration.js <ide> var Platform = require('Platform'); <ide> */ <ide> <ide> var Vibration = { <del> vibrate: function(duration: number = 400) { <add> vibrate: function(pattern: number | Array<number> = 400, repeat: boolean = false) { <ide> if (Platform.OS === 'android') { <del> RCTVibration.vibrate(duration); <add> if (typeof pattern === 'number') { <add> RCTVibration.vibrate(pattern); <add> } else if (Array.isArray(pattern)) { <add> RCTVibration.vibrateByPattern(pattern, repeat ? 0 : -1); <add> } else { <add> throw new Error('Vibration pattern should be a number or array'); <add> } <ide> } else { <del> RCTVibration.vibrate(); <add> if (typeof pattern === 'number') { <add> RCTVibration.vibrate(); <add> } else if (Array.isArray(pattern)) { <add> console.warn('Vibration patterns are not supported on iOS'); <add> } else { <add> throw new Error('Vibration pattern should be a number or array'); <add> } <add> } <add> }, <add> /** <add> * Stop vibration <add> * <add> * @platform android <add> */ <add> cancel: function() { <add> if (Platform.OS === 'ios') { <add> console.warn('Vibration.cancel is not supported on iOS'); <add> } else { <add> RCTVibration.cancel(); <ide> } <ide> } <ide> }; <ide><path>ReactAndroid/src/main/java/com/facebook/react/modules/vibration/VibrationModule.java <ide> import com.facebook.react.bridge.ReactApplicationContext; <ide> import com.facebook.react.bridge.ReactContextBaseJavaModule; <ide> import com.facebook.react.bridge.ReactMethod; <add>import com.facebook.react.bridge.ReadableArray; <ide> <ide> public class VibrationModule extends ReactContextBaseJavaModule { <ide> <ide> public void vibrate(int duration) { <ide> v.vibrate(duration); <ide> } <ide> } <add> <add> @ReactMethod <add> public void vibrateByPattern(ReadableArray pattern, int repeat) { <add> long[] patternLong = new long[pattern.size()]; <add> for (int i = 0; i < pattern.size(); i++) { <add> patternLong[i] = pattern.getInt(i); <add> } <add> <add> Vibrator v = (Vibrator) getReactApplicationContext().getSystemService(Context.VIBRATOR_SERVICE); <add> if (v != null) { <add> v.vibrate(patternLong, repeat); <add> } <add> } <add> <add> @ReactMethod <add> public void cancel() { <add> Vibrator v = (Vibrator) getReactApplicationContext().getSystemService(Context.VIBRATOR_SERVICE); <add> if (v != null) { <add> v.cancel(); <add> } <add> } <ide> }
3
Text
Text
update broken links on local setup guides.
3e7dcee0baf07a709aa7b0678ed75cbddbc5e516
<ide><path>docs/how-to-setup-freecodecamp-locally-using-docker.md <ide> Make sure the command line tool (Cmd, PowerShell or Git Bash for Windows, etc.) <ide> <ide> We regularly develop on popular and latest operating systems like macOS 10.12 or later, Ubuntu 16.04 or later and Windows 10. Its recommended to lookup your specific issue on resources like: Google, Stack Overflow or Stack Exchange. Chances are that someone has faced the same issue and there is already an answer to your specific query. <ide> <del>If you are on a different OS, and/or are still running into issues, reach out to [contributors community on our public forum](https://www.freeCodeCamp.org/c/contributors) or the [contributor's chat room](https://gitter.im/freeCodeCamp/Contributors). <add>If you are on a different OS, and/or are still running into issues, reach out to [contributors community on our public forum](https://www.freeCodeCamp.org/forum/c/contributors) or the [contributor's chat room](https://gitter.im/freeCodeCamp/Contributors). <ide> <ide> Please avoid creating GitHub issues for pre-requisite software and use the forum and chat room instead. <ide> <ide><path>docs/how-to-setup-freecodecamp-locally.md <ide> Make sure the command line tool you use (Cmd, PowerShell or Git Bash for Windows <ide> #### I am having issues with installing the recommended prerequisites. What should I do? <ide> We regularly develop on the latest or most popular operating systems like macOS 10.12 or later, Ubuntu 16.04 or later and Windows 10. It is recommended to lookup your specific issue on resources such as Google, Stack Overflow and Stack Exchange. Chances are good that someone has faced the same issue and there is already an answer to your specific query. <ide> <del>If you are on a different OS and/or are still running into issues, reach out to the [contributors community on our public forum](https://www.freeCodeCamp.org/c/contributors) or the [contributor's chat room](https://gitter.im/freeCodeCamp/Contributors). <add>If you are on a different OS and/or are still running into issues, reach out to the [contributors community on our public forum](https://www.freeCodeCamp.org/forum/c/contributors) or the [contributor's chat room](https://gitter.im/freeCodeCamp/Contributors). <ide> <ide> Please avoid creating GitHub issues for prerequisite issues. They are out of the scope of this project. <ide>
2
Python
Python
update bundled constants
79c4b44bbb51549c558d05a61a0d6b822409b14d
<ide><path>libcloud/compute/constants.py <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "a1.2xlarge", <add> "instancesku": "PXF95N47CM7CEGME", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "a1.4xlarge", <del> "instancesku": "W36FGJNN3ZBPWBFR", <add> "instancesku": "YGQD7BUTEKQ69RUF", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "a1.medium", <del> "instancesku": "H2THDDW6B7P8U78G", <ide> "memory": "2 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "2", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "a1.xlarge", <add> "instancesku": "CXU5MVU4CK2AJRKJ", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c3.2xlarge", <del> "instancesku": "YCPWPFV7C7FUPCBE", <add> "instancesku": "M7H5YZEQ9NGEV8X6", <ide> "memory": "15 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c3.4xlarge", <del> "instancesku": "HAMHHG546XFH7W8W", <ide> "memory": "30 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c3.8xlarge", <del> "instancesku": "N9DRUZYNWT9YS8PF", <add> "instancesku": "QUKUYT3MDRYSQE84", <ide> "memory": "60 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c3.large", <del> "instancesku": "QG8MPRPSA2WNM57J", <ide> "memory": "3.75 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c3.xlarge", <del> "instancesku": "EX3GSFDZWRFE8RYG", <ide> "memory": "7.5 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c4.8xlarge", <del> "instancesku": "9X896ABXDJYHXTFC", <add> "instancesku": "YVPPWZNF3F8UKWV6", <ide> "memory": "60 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c4.large", <del> "instancesku": "FGZHTUSVJ75M372Z", <add> "instancesku": "H8WWM9PCCPXYDQK7", <ide> "memory": "3.75 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c4.xlarge", <add> "instancesku": "V6VKYTN24UKH28U4", <ide> "memory": "7.5 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.12xlarge", <del> "instancesku": "3GR44UA2Q6S9ZYFV", <ide> "memory": "96 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.18xlarge", <del> "instancesku": "Q6EKQFYYFRDQ7KMN", <ide> "memory": "144 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "144", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.24xlarge", <del> "instancesku": "36NHB28B4QY925PB", <ide> "memory": "192 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.2xlarge", <del> "instancesku": "88X3R5DH2EPD3MRG", <add> "instancesku": "VPCR3S8GZ7VCX6CW", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.4xlarge", <del> "instancesku": "MUCENET3CTBK38ZU", <add> "instancesku": "NXGGFHBZBMWKZB9H", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.9xlarge", <del> "instancesku": "PD3BV6YJ4J7FSW3E", <ide> "memory": "72 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "72", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.large", <del> "instancesku": "AXHN7Q3J6Z8F5UH7", <add> "instancesku": "VJADNCQ75QDGMBEP", <ide> "memory": "4 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.metal", <add> "instancesku": "B3ET2CZBPE7AJE9E", <ide> "memory": "192 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5.xlarge", <del> "instancesku": "VN5XWNSN2W3F6TAC", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.12xlarge", <add> "instancesku": "DTTZK2J7TPS675MT", <ide> "memory": "96 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.18xlarge", <add> "instancesku": "S248AHS9VKXTENEV", <ide> "memory": "144 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "144", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.24xlarge", <del> "instancesku": "ZUY35VHVWK72A3MX", <add> "instancesku": "XWYCW4M9VXM76YFS", <ide> "memory": "192 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.2xlarge", <add> "instancesku": "DFTSKTFNKWM4G5WC", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.4xlarge", <del> "instancesku": "E3J2WNEP4FB5RBG4", <add> "instancesku": "H59GHPATMU8YJBZS", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.large", <add> "instancesku": "GMJQENMMHSTFC9E3", <ide> "memory": "4 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5d.metal", <add> "instancesku": "C4EZ2698HTGJC68C", <ide> "memory": "192 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.18xlarge", <del> "instancesku": "ZTJT2BWK6BH4B5PM", <ide> "memory": "192 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "144", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.2xlarge", <del> "instancesku": "WCK6YA4JVWWRTURJ", <add> "instancesku": "G22JK6QC3PVQE6RB", <ide> "memory": "21 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.4xlarge", <del> "instancesku": "G4EM2YEU83PCCBNV", <ide> "memory": "42 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.9xlarge", <del> "instancesku": "3KXARKANQ8NUSJJX", <add> "instancesku": "ZMPRVNHGMD9YGMKQ", <ide> "memory": "96 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "72", <ide> "ecu": "NA", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.metal", <add> "instancesku": "EGRB9Q3E8UHAKJVM", <ide> "memory": "192 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "144", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Compute optimized", <ide> "instanceType": "c5n.xlarge", <del> "instancesku": "CA4QNES7PTHE2XB6", <add> "instancesku": "6H8KZGH4BJETFXUA", <ide> "memory": "10.5 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "d2.2xlarge", <del> "instancesku": "G849FZ42MCE9EBVZ", <add> "instancesku": "RBTTMD5AP4CP4TEB", <ide> "memory": "61 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "d2.4xlarge", <del> "instancesku": "NYS64V9KFDU7WVMA", <ide> "memory": "122 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "d2.8xlarge", <del> "instancesku": "N2MNBRAGGBXSYM55", <add> "instancesku": "XHTGRKAAUAXJEMXS", <ide> "memory": "244 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "26", <ide> "instanceFamily": "FPGA Instances", <ide> "instanceType": "f1.2xlarge", <add> "instancesku": "YQTVPR6CQNPZSMKF", <ide> "memory": "122 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g2.2xlarge", <del> "instancesku": "YBFPE73HS26KSZV4", <ide> "memory": "15 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "gpu": "4", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g2.8xlarge", <add> "instancesku": "KRSYZR6MVXE9P7C4", <ide> "memory": "60 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "gpu": "4", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g3.16xlarge", <del> "instancesku": "AGUP76HYFFKHT5EX", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g3.4xlarge", <del> "instancesku": "28RUQ9PS5YD6PR57", <add> "instancesku": "YEFTEAFSUA38NK4A", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "gpu": "2", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g3.8xlarge", <del> "instancesku": "8M2K822NMB3CT72E", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g3s.xlarge", <add> "instancesku": "4JCEMPTA6ZG8REPE", <ide> "memory": "30.5 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "gpu": "4", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g4dn.12xlarge", <del> "instancesku": "XDV6NVR8PQRQYASE", <ide> "memory": "192 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "NA", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g4dn.16xlarge", <add> "instancesku": "G3VSWQ88ZFAVQZFG", <ide> "memory": "256 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "NA", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g4dn.2xlarge", <add> "instancesku": "WBQ84N4TGPV8868R", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "NA", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g4dn.4xlarge", <add> "instancesku": "CBNPETASHUK9VAAG", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "NA", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "g4dn.8xlarge", <del> "instancesku": "E2KBVHZAUMXQ8PPN", <add> "instancesku": "5U3QZD9PAJ7H9NR2", <ide> "memory": "128 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "NA", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "h1.8xlarge", <del> "instancesku": "KXZVS9AJBTN5NBSE", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i2.8xlarge", <add> "instancesku": "5D53HDM83GKREZNX", <ide> "memory": "244 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3.16xlarge", <del> "instancesku": "5YNBUXM52P4MX56V", <ide> "memory": "488 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3.4xlarge", <del> "instancesku": "UXX64BVQZGDDAYH4", <add> "instancesku": "5TYYNYSY7E4RM6S7", <ide> "memory": "122 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3.8xlarge", <del> "instancesku": "QYM6MFKVAX2HVNZ7", <ide> "memory": "244 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3.large", <del> "instancesku": "7AJJ9AVBD4XPHVGG", <add> "instancesku": "VG6CXQR4KKMNWZF7", <ide> "memory": "15.25 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3.xlarge", <add> "instancesku": "VNMNCDMHG3ZNYGZK", <ide> "memory": "30.5 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.24xlarge", <del> "instancesku": "BDHZM4REJAGAW8M9", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.2xlarge", <del> "instancesku": "8VTDFU6DK2DEYP4B", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.3xlarge", <del> "instancesku": "A8KEEFUZKFYD2HWX", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.6xlarge", <add> "instancesku": "YFHT2PN4G9PER4PP", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.large", <del> "instancesku": "AJZY6HSPNN38FGVN", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Storage optimized", <ide> "instanceType": "i3en.xlarge", <del> "instancesku": "WPFEXAJ5SY5J52CA", <add> "instancesku": "W27NUGCZPBTDP8QZ", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "name": "i3en.xlarge", <ide> "ram": 32768 <ide> }, <add> "inf1.24xlarge": { <add> "bandwidth": 100, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "Machine Learning ASIC Instances", <add> "instanceType": "inf1.24xlarge", <add> "instancesku": "M2ADJHDGNEW2FNRX", <add> "memory": "192 GiB", <add> "networkPerformance": "100 Gigabit", <add> "normalizationSizeFactor": "NA", <add> "physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "96" <add> }, <add> "id": "inf1.24xlarge", <add> "name": "inf1.24xlarge", <add> "ram": 196608 <add> }, <add> "inf1.2xlarge": { <add> "bandwidth": 25, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "Machine Learning ASIC Instances", <add> "instanceType": "inf1.2xlarge", <add> "instancesku": "N9WZQPSM7FWEMU5G", <add> "memory": "16 GiB", <add> "networkPerformance": "Up to 25 Gigabit", <add> "normalizationSizeFactor": "NA", <add> "physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "8" <add> }, <add> "id": "inf1.2xlarge", <add> "name": "inf1.2xlarge", <add> "ram": 16384 <add> }, <add> "inf1.6xlarge": { <add> "bandwidth": 25, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "Machine Learning ASIC Instances", <add> "instanceType": "inf1.6xlarge", <add> "instancesku": "9JUMD6WZN9Q26XBF", <add> "memory": "48 GiB", <add> "networkPerformance": "25 Gigabit", <add> "normalizationSizeFactor": "NA", <add> "physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "24" <add> }, <add> "id": "inf1.6xlarge", <add> "name": "inf1.6xlarge", <add> "ram": 49152 <add> }, <add> "inf1.xlarge": { <add> "bandwidth": 25, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "Machine Learning ASIC Instances", <add> "instanceType": "inf1.xlarge", <add> "memory": "8 GiB", <add> "networkPerformance": "Up to 25 Gigabit", <add> "normalizationSizeFactor": "NA", <add> "physicalProcessor": "Intel Xeon Platinum 8275CL (Cascade Lake)", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "4" <add> }, <add> "id": "inf1.xlarge", <add> "name": "inf1.xlarge", <add> "ram": 8192 <add> }, <ide> "m1.large": { <ide> "bandwidth": None, <ide> "disk": 840, <ide> "ecu": "26", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m3.2xlarge", <del> "instancesku": "TQV69AZUDVU7GUKY", <ide> "memory": "30 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "6.5", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m3.large", <del> "instancesku": "RR4F5JD777HAHSYH", <ide> "memory": "7.5 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "13", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m3.xlarge", <del> "instancesku": "3BJTRXMEV7KYHD38", <ide> "memory": "15 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m4.16xlarge", <del> "instancesku": "X2X24YX27R2KBUUW", <ide> "memory": "256 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m4.2xlarge", <add> "instancesku": "Y5C5MDBJXUNFSVUV", <ide> "memory": "32 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m4.large", <del> "instancesku": "5JBYKNTWNAS9ZFFH", <add> "instancesku": "ZTRYHCJDHUC65SBA", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m4.xlarge", <add> "instancesku": "C77QEBMPYEYRVR72", <ide> "memory": "16 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.16xlarge", <del> "instancesku": "SBFN2XK3Y5BG7EXF", <ide> "memory": "256 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.24xlarge", <del> "instancesku": "P4KSJG44UZK55VM6", <add> "instancesku": "TE59NSHRJZP5DUNQ", <ide> "memory": "384 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.8xlarge", <del> "instancesku": "ZR8YQCJPBVPHYKFP", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.large", <add> "instancesku": "VY7MYHUCD3HXH59H", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.metal", <del> "instancesku": "66YYQZC96S8EX4ZK", <add> "instancesku": "ZPNARMXP87XD5V8K", <ide> "memory": "384 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5.xlarge", <del> "instancesku": "KECMHH6X4HWTHXK2", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.12xlarge", <add> "instancesku": "ZKS6CMH399KJB2B7", <ide> "memory": "192 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.16xlarge", <add> "instancesku": "8DNRCXVBRG7F4MSH", <ide> "memory": "256 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.24xlarge", <del> "instancesku": "STFBNQMSZYS6MD2H", <ide> "memory": "384 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.2xlarge", <add> "instancesku": "7UXUFAXY2RPS8HHZ", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.4xlarge", <del> "instancesku": "W972UYZ8QMMHJJX6", <add> "instancesku": "WEZTMEFQKVPD5VT9", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.8xlarge", <del> "instancesku": "XFGHXE8V3UANKE74", <add> "instancesku": "Z5XC8PP48PT53DD4", <ide> "memory": "128 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.large", <add> "instancesku": "HPBR5QJBN8QZ2Y2F", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5a.xlarge", <del> "instancesku": "M9CQ62ACEAGYAAXG", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.12xlarge", <add> "instancesku": "ZQX6BBMVYUHNFTPS", <ide> "memory": "192 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.16xlarge", <add> "instancesku": "XC3AVTWJS88Y8KEE", <ide> "memory": "256 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.24xlarge", <add> "instancesku": "ZBFUWBTJHVFF7EB5", <ide> "memory": "384 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.2xlarge", <del> "instancesku": "TKUSAWKQ4TZ3SG7Z", <add> "instancesku": "3CZ42QWHYUZ7RB38", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.4xlarge", <del> "instancesku": "SKWWRATAKFV3VHPD", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.8xlarge", <del> "instancesku": "R2SVZ2KACPAPRG4Q", <add> "instancesku": "QYFJ9Z87HETGGS68", <ide> "memory": "128 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.large", <del> "instancesku": "WE4T4UPYM8K6QPAX", <add> "instancesku": "B4RUKMBYKEBBGHE8", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5ad.xlarge", <del> "instancesku": "7ZFGKJH2SKTXNXUS", <add> "instancesku": "B7P4C2M94TNDSCXT", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.12xlarge", <add> "instancesku": "PANNPQGG6NQMG9ZE", <ide> "memory": "192 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.16xlarge", <del> "instancesku": "EK9X9WR5G9DKJG22", <add> "instancesku": "48B3N7VQAWGK5TPT", <ide> "memory": "256 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.8xlarge", <del> "instancesku": "7R9VZQR8QYY72SSD", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.large", <del> "instancesku": "39F2ZK2XQ6APYVCQ", <add> "instancesku": "HWFFPM96N6XXNF49", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.metal", <del> "instancesku": "J8SWUH8ESMKRCSUM", <add> "instancesku": "CZBVP7YKTQ4QE49D", <ide> "memory": "384 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5d.xlarge", <del> "instancesku": "7D7GHDJPW28VS9Q6", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.24xlarge", <del> "instancesku": "VMCNTW9AFPF6G9SR", <add> "instancesku": "ES7UA8VJBKH4JAYD", <ide> "memory": "384 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.2xlarge", <del> "instancesku": "A9JSU2TJ3WG95YFE", <add> "instancesku": "47VYCAQ72FTCJXNS", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.4xlarge", <del> "instancesku": "8PK6TTPSU6M3BEYW", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.8xlarge", <add> "instancesku": "BR6WTCKQBQZZ8Q5J", <ide> "memory": "128 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.large", <add> "instancesku": "SUQDDWTD96359XRS", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5dn.xlarge", <del> "instancesku": "WFFXKEJD37RBUZ7N", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.12xlarge", <del> "instancesku": "Y8E5X9WCMT4DSRR4", <ide> "memory": "192 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.16xlarge", <del> "instancesku": "8FU6NVSYBEFDYARH", <add> "instancesku": "F53Q533RRE4RQ4NA", <ide> "memory": "256 GiB", <ide> "networkPerformance": "75 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.24xlarge", <del> "instancesku": "RKRFJ587XABVAPJG", <ide> "memory": "384 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.2xlarge", <add> "instancesku": "8HNQ8E28W85FVT8G", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.4xlarge", <del> "instancesku": "B4XTFHJSPKUU45V5", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.8xlarge", <del> "instancesku": "FWW6UQG8RRHQGKH2", <ide> "memory": "128 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "NA", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "m5n.large", <del> "instancesku": "4HTQRGZ7UAN6CVDT", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "name": "m5n.xlarge", <ide> "ram": 16384 <ide> }, <add> "m6g.12xlarge": { <add> "bandwidth": 12, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.12xlarge", <add> "instancesku": "WXS95TSP9GYK5XM3", <add> "memory": "192 GiB", <add> "networkPerformance": "12 Gigabit", <add> "normalizationSizeFactor": "96", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "48" <add> }, <add> "id": "m6g.12xlarge", <add> "name": "m6g.12xlarge", <add> "ram": 196608 <add> }, <add> "m6g.16xlarge": { <add> "bandwidth": 25, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.16xlarge", <add> "instancesku": "ZWBXRUA2ZJGEWZK8", <add> "memory": "256 GiB", <add> "networkPerformance": "25 Gigabit", <add> "normalizationSizeFactor": "128", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "64" <add> }, <add> "id": "m6g.16xlarge", <add> "name": "m6g.16xlarge", <add> "ram": 262144 <add> }, <add> "m6g.2xlarge": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.2xlarge", <add> "instancesku": "X64NQNC7CU8T4Q9N", <add> "memory": "32 GiB", <add> "networkPerformance": "Up to 10 Gigabit", <add> "normalizationSizeFactor": "16", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "8" <add> }, <add> "id": "m6g.2xlarge", <add> "name": "m6g.2xlarge", <add> "ram": 32768 <add> }, <add> "m6g.4xlarge": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.4xlarge", <add> "instancesku": "3AMNDVWUPXYDQW8Q", <add> "memory": "64 GiB", <add> "networkPerformance": "Up to 10 Gigabit", <add> "normalizationSizeFactor": "32", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "16" <add> }, <add> "id": "m6g.4xlarge", <add> "name": "m6g.4xlarge", <add> "ram": 65536 <add> }, <add> "m6g.8xlarge": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.8xlarge", <add> "memory": "128 GiB", <add> "networkPerformance": "10 Gigabit", <add> "normalizationSizeFactor": "64", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "32" <add> }, <add> "id": "m6g.8xlarge", <add> "name": "m6g.8xlarge", <add> "ram": 131072 <add> }, <add> "m6g.large": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.large", <add> "instancesku": "6QMSUE84AB5NCJA6", <add> "memory": "8 GiB", <add> "networkPerformance": "Up to 10 Gigabit", <add> "normalizationSizeFactor": "4", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "2" <add> }, <add> "id": "m6g.large", <add> "name": "m6g.large", <add> "ram": 8192 <add> }, <add> "m6g.medium": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.medium", <add> "instancesku": "5UHWBR46QJZ28H8Z", <add> "memory": "4 GiB", <add> "networkPerformance": "Up to 10 Gigabit", <add> "normalizationSizeFactor": "2", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "1" <add> }, <add> "id": "m6g.medium", <add> "name": "m6g.medium", <add> "ram": 4096 <add> }, <add> "m6g.xlarge": { <add> "bandwidth": 10, <add> "disk": 0, <add> "extra": { <add> "currentGeneration": "Yes", <add> "ecu": "NA", <add> "instanceFamily": "General purpose", <add> "instanceType": "m6g.xlarge", <add> "instancesku": "XHX3DRZGR8M3TWTH", <add> "memory": "16 GiB", <add> "networkPerformance": "Up to 10 Gigabit", <add> "normalizationSizeFactor": "8", <add> "physicalProcessor": "AWS Graviton2 Processor", <add> "processorArchitecture": "64-bit", <add> "servicecode": "AmazonEC2", <add> "servicename": "Amazon Elastic Compute Cloud", <add> "storage": "EBS only", <add> "vcpu": "4" <add> }, <add> "id": "m6g.xlarge", <add> "name": "m6g.xlarge", <add> "ram": 16384 <add> }, <ide> "p2.16xlarge": { <ide> "bandwidth": 20, <ide> "disk": 0, <ide> "gpu": "16", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "p2.16xlarge", <del> "instancesku": "HQ9UDBT567DHB26B", <add> "instancesku": "QDNG8WDAUR3FP7H9", <ide> "memory": "768 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "gpu": "8", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "p2.8xlarge", <del> "instancesku": "PHZYKS4DKKZQTHE2", <add> "instancesku": "UAKHM4ASYH9KFBED", <ide> "memory": "488 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "gpu": "1", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "p3.2xlarge", <del> "instancesku": "BUEPRDN9GYBNSP3F", <add> "instancesku": "WQGN888DZWVH9QNY", <ide> "memory": "61 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "gpu": "8", <ide> "instanceFamily": "GPU instance", <ide> "instanceType": "p3dn.24xlarge", <add> "instancesku": "M7YPWRJ6SN5FBU9E", <ide> "memory": "768 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r3.2xlarge", <del> "instancesku": "Q38HDN9FW76EPN89", <add> "instancesku": "WNYWP7QUJ3MU8NVV", <ide> "memory": "61 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r3.4xlarge", <del> "instancesku": "CGFRHNKJF44GQJCF", <ide> "memory": "122 GiB", <ide> "networkPerformance": "High", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r3.8xlarge", <del> "instancesku": "DPJTFM4923H5Y3HR", <ide> "memory": "244 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r3.large", <del> "instancesku": "M43GX99BWR3FHZVF", <add> "instancesku": "Y9V4TXXZGFP2AE74", <ide> "memory": "15.25 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r4.16xlarge", <del> "instancesku": "68WRF68KY39ACAQ7", <ide> "memory": "488 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r4.2xlarge", <del> "instancesku": "CK9D6MQST3JZGPVU", <add> "instancesku": "UEKM65C5TGR2WQSQ", <ide> "memory": "61 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r4.4xlarge", <add> "instancesku": "2S5Y7E4PPC4BYDAQ", <ide> "memory": "122 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r4.8xlarge", <del> "instancesku": "K9MZNNCDC6CYZ8VT", <add> "instancesku": "FMSC9YRVXPCMYSS8", <ide> "memory": "244 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r4.large", <add> "instancesku": "QQ2WZEUN8QPX5PMW", <ide> "memory": "15.25 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "173", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.12xlarge", <add> "instancesku": "DXKWCMFPJUWSEGUR", <ide> "memory": "384 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "ecu": "262", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.16xlarge", <del> "instancesku": "YVXVZ7PA394KP8J9", <ide> "memory": "512 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "ecu": "347", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.24xlarge", <del> "instancesku": "VGPZNYJU4BFPGPKB", <ide> "memory": "768 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "71", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.4xlarge", <add> "instancesku": "B8XGV7V3C9Q4AYY2", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "ecu": "131", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.8xlarge", <add> "instancesku": "Z7M6NHX8E6R8B76E", <ide> "memory": "256 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "9", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.large", <del> "instancesku": "Q7KYRBA7JAZ27AP3", <ide> "memory": "16 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "19", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5.xlarge", <add> "instancesku": "RMMH5HE9GS5F6GYJ", <ide> "memory": "32 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.12xlarge", <del> "instancesku": "B6UKFSTPUKGQGFPF", <add> "instancesku": "X6BCRUAZXSXADAUG", <ide> "memory": "384 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.16xlarge", <del> "instancesku": "B5JRBEPNNQMYV3BK", <add> "instancesku": "2FTPQY75JWEAVEEG", <ide> "memory": "512 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.24xlarge", <del> "instancesku": "2JBJ68W3VPDUWJB2", <add> "instancesku": "CS73RGAQDKF8K57N", <ide> "memory": "768 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.2xlarge", <del> "instancesku": "CCTZ4ZMDKVZC997H", <ide> "memory": "64 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.4xlarge", <add> "instancesku": "BN4KDYWR63ZG4DNN", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.8xlarge", <del> "instancesku": "VKMKTRZRKPWPW5Z2", <add> "instancesku": "XQ965Z83JH9PQTN9", <ide> "memory": "256 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5a.xlarge", <del> "instancesku": "42FMVC2NSPDAE7E7", <ide> "memory": "32 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.12xlarge", <add> "instancesku": "FYM25RMMBRJZD6E4", <ide> "memory": "384 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.16xlarge", <add> "instancesku": "P2Q2KBJFAH6NB3YJ", <ide> "memory": "512 GiB", <ide> "networkPerformance": "12 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.24xlarge", <del> "instancesku": "4HHK972ZX33G4SDA", <add> "instancesku": "S45XRQBRG6SRGBB6", <ide> "memory": "768 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.2xlarge", <add> "instancesku": "9PSQBVAW344QUBJZ", <ide> "memory": "64 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.4xlarge", <del> "instancesku": "QNE9MDEAXAKD2R9M", <ide> "memory": "128 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.8xlarge", <del> "instancesku": "4SJE5XXWMB5M8HMH", <add> "instancesku": "5BRUFFXS6DZWBU6Z", <ide> "memory": "256 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.large", <del> "instancesku": "QMXWJB935V36EHMY", <ide> "memory": "16 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5ad.xlarge", <del> "instancesku": "3YEJAM5J2CXJHQT8", <add> "instancesku": "945KN6RZ9RHHP6SE", <ide> "memory": "32 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "173", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.12xlarge", <del> "instancesku": "UGB4B7ZEFQWRXWUW", <add> "instancesku": "UZCG9B9DDS7F4RVT", <ide> "memory": "384 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "ecu": "262", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.16xlarge", <del> "instancesku": "PHUUU55SYPVASZ2Y", <add> "instancesku": "R3G7NYKYUB88W5F9", <ide> "memory": "512 GiB", <ide> "networkPerformance": "20 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "ecu": "347", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.24xlarge", <del> "instancesku": "U6UN58XSFDT64ZP5", <ide> "memory": "768 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "38", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.2xlarge", <del> "instancesku": "2QDMX62UJDRXJ7NJ", <ide> "memory": "64 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "131", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.8xlarge", <add> "instancesku": "RM4QPZQ2J8BVXCJQ", <ide> "memory": "256 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "10", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.large", <add> "instancesku": "6DGWHWNHJ9R2JD4M", <ide> "memory": "16 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "19", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5d.xlarge", <add> "instancesku": "JJSSHRVCN23XPH2Q", <ide> "memory": "32 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.12xlarge", <del> "instancesku": "ZHURE4JS447KMM2S", <ide> "memory": "384 GiB", <ide> "networkPerformance": "50 Gigabit", <ide> "normalizationSizeFactor": "96", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.16xlarge", <add> "instancesku": "5CX55PVXM987JFNB", <ide> "memory": "512 GiB", <ide> "networkPerformance": "75 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.24xlarge", <del> "instancesku": "BAP6QNQX6PMZERRG", <add> "instancesku": "R5RA9YQMGU8NV4H5", <ide> "memory": "768 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.2xlarge", <add> "instancesku": "WT5GUUC98EJXBSUN", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.8xlarge", <add> "instancesku": "TJDZYS6EDD2ENFUF", <ide> "memory": "256 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5dn.xlarge", <del> "instancesku": "TWPKUS5NHW7Z4NB8", <add> "instancesku": "ZVDVMR78JGJKF4SU", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.16xlarge", <del> "instancesku": "UVPZAB7XTXQ9YY4Y", <add> "instancesku": "6RCKQK4ZDS4RDZMW", <ide> "memory": "512 GiB", <ide> "networkPerformance": "75 Gigabit", <ide> "normalizationSizeFactor": "128", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.24xlarge", <del> "instancesku": "MZJXBUVPSVQR5VRQ", <add> "instancesku": "9FAVMPT4X6AYYFGA", <ide> "memory": "768 GiB", <ide> "networkPerformance": "100 Gigabit", <ide> "normalizationSizeFactor": "192", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.2xlarge", <del> "instancesku": "XTVBTGKRNJDNQY2Q", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.4xlarge", <add> "instancesku": "C5TVWHHUXN79MRF7", <ide> "memory": "128 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "32", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.8xlarge", <del> "instancesku": "5R8AMPQEDQRVVHZE", <add> "instancesku": "JZC7NQ2B2QNKCTUB", <ide> "memory": "256 GiB", <ide> "networkPerformance": "25 Gigabit", <ide> "normalizationSizeFactor": "64", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.large", <del> "instancesku": "USJDNWUBTR27X6TS", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "NA", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "r5n.xlarge", <add> "instancesku": "Q2VTVZF9TSAVSVHK", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 25 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.2xlarge", <del> "instancesku": "P66QF9GGMTFWWDQX", <add> "instancesku": "2S29GABT3GMS28E4", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.large", <del> "instancesku": "BX22DHCNGN3TH6Q4", <add> "instancesku": "GKT3FV8CG9PTDG5F", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.medium", <del> "instancesku": "HKEURG3XPD7FP4F4", <ide> "memory": "4 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "2", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.micro", <del> "instancesku": "DZNVM5GF7B6J3K95", <ide> "memory": "1 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "0.5", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.nano", <del> "instancesku": "BQ8B7YSYSXKFY5Y5", <add> "instancesku": "FVTK6U2583MJNX7T", <ide> "memory": "0.5 GiB", <ide> "networkPerformance": "Low", <ide> "normalizationSizeFactor": "0.25", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.small", <del> "instancesku": "DYK3G4TC7PX38CFA", <ide> "memory": "2 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "1", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t2.xlarge", <del> "instancesku": "ZJEZ75T3VEYNWUPJ", <add> "instancesku": "6VG367TBGT66TM4N", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "8", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.2xlarge", <del> "instancesku": "55UC8P9F84WSQ43Y", <add> "instancesku": "XM7SWUCRKTXZPGHV", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.large", <del> "instancesku": "48RF27XU9N6AAAX3", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.medium", <add> "instancesku": "GJYPAT44A5QSM2B7", <ide> "memory": "4 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "2", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.nano", <del> "instancesku": "K7ERD2Q28HHU97DT", <ide> "memory": "0.5 GiB", <ide> "networkPerformance": "Low", <ide> "normalizationSizeFactor": "0.25", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.small", <add> "instancesku": "VKZYCWKA2GS726XR", <ide> "memory": "2 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "1", <ide> "ecu": "Variable", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3.xlarge", <del> "instancesku": "QHCKDW8QRJS8S924", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.2xlarge", <del> "instancesku": "W3RGR848XT7KYFX3", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "16", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.large", <del> "instancesku": "7XVJY9GMQFZ5ZJRB", <add> "instancesku": "B7EA6QRS7T5YJNMF", <ide> "memory": "8 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "4", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.medium", <del> "instancesku": "7N48GZQZNJTZ58JB", <ide> "memory": "4 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "2", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.micro", <del> "instancesku": "SGFUDXSZXY8TA5AX", <add> "instancesku": "87N7TWAE8HTWZKPU", <ide> "memory": "1 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "0.5", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.nano", <del> "instancesku": "PK2KTRY9P5937ZV9", <add> "instancesku": "5S8CWFNMNH323ENK", <ide> "memory": "0.5 GiB", <ide> "networkPerformance": "Low", <ide> "normalizationSizeFactor": "0.25", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.small", <del> "instancesku": "U86SPCA6K7UXUXQA", <add> "instancesku": "SW75JWM3MZKMRWQA", <ide> "memory": "2 GiB", <ide> "networkPerformance": "Low to Moderate", <ide> "normalizationSizeFactor": "1", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "General purpose", <ide> "instanceType": "t3a.xlarge", <add> "instancesku": "E5WX8TZA5VT6PA2W", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Moderate", <ide> "normalizationSizeFactor": "8", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1.16xlarge", <del> "instancesku": "PCP7P8UCEAVWH57Z", <add> "instancesku": "PNB52N8KWWS43WKU", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1.32xlarge", <add> "instancesku": "VZ27JH33XWY95P3Q", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1e.16xlarge", <add> "instancesku": "TTJN9QZRH883NTME", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1e.2xlarge", <del> "instancesku": "9VVY42SVXEGDEQN4", <add> "instancesku": "5992AUV2Z5GF7S4U", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "memory": "244 GiB", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1e.32xlarge", <del> "instancesku": "BG3VCUR74QWTZ5VC", <add> "instancesku": "5WG865KFK85P29F7", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1e.8xlarge", <del> "instancesku": "9NSWXFN6ZG86B66F", <add> "instancesku": "UC54XWQVHQPRYB9K", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "intelTurboAvailable": "Yes", <ide> "enhancedNetworkingSupported": "Yes", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "x1e.xlarge", <del> "instancesku": "J9NXUGCPGSVTFAXU", <add> "instancesku": "HFQ8AMCHDHAYYKXJ", <ide> "intelAvx2Available": "Yes", <ide> "intelAvxAvailable": "Yes", <ide> "memory": "122 GiB", <ide> "ecu": "53", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "z1d.2xlarge", <add> "instancesku": "ATNCMJYDTUQK6GJA", <ide> "memory": "64 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "16", <ide> "ecu": "75", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "z1d.3xlarge", <add> "instancesku": "BEQQVCQEY5EUR8JX", <ide> "memory": "96 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "24", <ide> "ecu": "134", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "z1d.6xlarge", <add> "instancesku": "DCUZ2ZK4HDM58BQZ", <ide> "memory": "192 GiB", <ide> "networkPerformance": "10 Gigabit", <ide> "normalizationSizeFactor": "48", <ide> "ecu": "15", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "z1d.large", <del> "instancesku": "BE34BU8ZDTG7UA54", <ide> "memory": "16 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "4", <ide> "ecu": "28", <ide> "instanceFamily": "Memory optimized", <ide> "instanceType": "z1d.xlarge", <del> "instancesku": "64GH9ET6WYBMDNQY", <ide> "memory": "32 GiB", <ide> "networkPerformance": "Up to 10 Gigabit", <ide> "normalizationSizeFactor": "8", <ide> "endpoint": "ec2.ap-southeast-1.amazonaws.com", <ide> "id": "ap-southeast-1", <ide> "instance_types": [ <add> "a1.2xlarge", <add> "a1.4xlarge", <add> "a1.large", <add> "a1.medium", <add> "a1.metal", <add> "a1.xlarge", <ide> "c1.medium", <ide> "c1.xlarge", <ide> "c3.2xlarge", <ide> "t3a.small", <ide> "t3a.xlarge", <ide> "x1.16xlarge", <del> "x1.32xlarge" <add> "x1.32xlarge", <add> "x1e.16xlarge", <add> "x1e.2xlarge", <add> "x1e.32xlarge", <add> "x1e.4xlarge", <add> "x1e.8xlarge", <add> "x1e.xlarge" <ide> ], <ide> "signature_version": "4" <ide> }, <ide> "i3en.6xlarge", <ide> "i3en.large", <ide> "i3en.xlarge", <add> "inf1.24xlarge", <add> "inf1.2xlarge", <add> "inf1.6xlarge", <add> "inf1.xlarge", <ide> "m1.large", <ide> "m1.medium", <ide> "m1.small", <ide> "m5n.large", <ide> "m5n.metal", <ide> "m5n.xlarge", <add> "m6g.12xlarge", <add> "m6g.16xlarge", <add> "m6g.2xlarge", <add> "m6g.4xlarge", <add> "m6g.8xlarge", <add> "m6g.large", <add> "m6g.medium", <add> "m6g.xlarge", <ide> "p2.16xlarge", <ide> "p2.8xlarge", <ide> "p2.xlarge", <ide> "i3en.6xlarge", <ide> "i3en.large", <ide> "i3en.xlarge", <add> "inf1.24xlarge", <add> "inf1.2xlarge", <add> "inf1.6xlarge", <add> "inf1.xlarge", <ide> "m1.large", <ide> "m1.medium", <ide> "m1.small",
1
Text
Text
add es6 introduction to js certification section
f94d1ede927b43af81defe808010fd13f237c1e3
<ide><path>client/src/pages/guide/english/certifications/javascript-algorithms-and-data-structures/es6/index.md <ide> title: ES6 <ide> --- <ide> ## ES6 <ide> <del>This is a stub. <a href='https://github.com/freecodecamp/guides/tree/master/src/pages/mathematics/quadratic-equations/index.md' target='_blank' rel='nofollow'>Help our community expand it</a>. <add>ES6 also known as ECMAScript6 or ES2015, is the latest widely accepted sets of rules and conventions laid out and standardized by Ecma International. Various new features such as constant value variables and arrow functions have been introduced in this new version. Major web browsers support some features of ES6. However, it is possible to use a transpiler to convert ES6 code into ES5, which is better supported on most browsers. <ide> <del><a href='https://github.com/freecodecamp/guides/blob/master/README.md' target='_blank' rel='nofollow'>This quick style guide will help ensure your pull request gets accepted</a>. <del> <del><!-- The article goes here, in GitHub-flavored Markdown. Feel free to add YouTube videos, images, and CodePen/JSBin embeds --> <del> <del>#### More Information: <del><!-- Please add any articles you think might be helpful to read before writing the article --> <add> #### More Information: <add> <add>1. [Wikipedia | Ecma International](https://en.wikipedia.org/wiki/Ecma_International) <add>2. [Wikipedia | ECMAScript](https://en.wikipedia.org/wiki/ECMAScript)
1
Javascript
Javascript
move cssvar out of react-native-github
d815d1df9a5fef1db7666da6802fb231f1f0aa1f
<ide><path>Examples/UIExplorer/NavigationExperimental/LegacyNavigator/NavigationBarSample.js <ide> var { <ide> TouchableOpacity, <ide> } = React; <ide> <del>var cssVar = require('cssVar'); <del> <ide> var Navigator = NavigationExperimental.LegacyNavigator; <ide> <ide> class NavButton extends React.Component { <ide> var styles = StyleSheet.create({ <ide> marginVertical: 10, <ide> }, <ide> navBarTitleText: { <del> color: cssVar('fbui-bluegray-60'), <add> color: '#373E4D', <ide> fontWeight: '500', <ide> marginVertical: 9, <ide> }, <ide> var styles = StyleSheet.create({ <ide> paddingRight: 10, <ide> }, <ide> navBarButtonText: { <del> color: cssVar('fbui-accent-blue'), <add> color: '#5890FF', <ide> }, <ide> scene: { <ide> flex: 1, <ide><path>Examples/UIExplorer/Navigator/NavigationBarSample.js <ide> var { <ide> TouchableOpacity, <ide> } = React; <ide> <del>var cssVar = require('cssVar'); <del> <ide> class NavButton extends React.Component { <ide> render() { <ide> return ( <ide> var styles = StyleSheet.create({ <ide> marginVertical: 10, <ide> }, <ide> navBarTitleText: { <del> color: cssVar('fbui-bluegray-60'), <add> color: '#373E4D', <ide> fontWeight: '500', <ide> marginVertical: 9, <ide> }, <ide> var styles = StyleSheet.create({ <ide> paddingRight: 10, <ide> }, <ide> navBarButtonText: { <del> color: cssVar('fbui-accent-blue'), <add> color: '#5890FF', <ide> }, <ide> scene: { <ide> flex: 1, <ide><path>Libraries/Utilities/CSSVarConfig.js <del>/** <del> * Copyright 2004-present Facebook. All Rights Reserved. <del> * <del> * @providesModule CSSVarConfig <del> */ <del>'use strict'; <del> <del>module.exports = { <del> 'fbui-accent-blue': '#5890ff', <del> 'fbui-bluegray-80': '#141823', <del> 'fbui-bluegray-60': '#373e4d', <del> 'fbui-bluegray-50': '#4e5665', <del> 'fbui-bluegray-40': '#6a7180', <del> 'fbui-bluegray-30': '#9197a3', <del> 'fbui-bluegray-20': '#bdc1c9', <del> 'fbui-bluegray-10': '#dcdee3', <del> 'fbui-bluegray-5': '#e9eaed', <del> 'fbui-bluegray-2': '#f6f7f8', <del> 'fbui-gray-80': '#333333', <del> 'fbui-gray-60': '#666666', <del> 'fbui-gray-40': '#999999', <del> 'fbui-gray-20': '#cccccc', <del> 'fbui-gray-10': '#e5e5e5', <del> 'fbui-gray-5': '#f2f2f2', <del> 'fbui-gray-2': '#fafafa', <del> 'fbui-red': '#dc3847', <del> 'x-mobile-medium-text': '#6a7180', <del>}; <ide><path>Libraries/Utilities/cssVar.js <del>/** <del> * Copyright 2004-present Facebook. All Rights Reserved. <del> * <del> * @providesModule cssVar <del> * @typechecks <del> */ <del>'use strict'; <del> <del>var invariant = require('fbjs/lib/invariant'); <del>var CSSVarConfig = require('CSSVarConfig'); <del> <del>var cssVar = function(/*string*/ key) /*string*/ { <del> invariant(CSSVarConfig[key], 'invalid css variable ' + key); <del> <del> return CSSVarConfig[key]; <del>}; <del> <del>module.exports = cssVar;
4
Ruby
Ruby
use helper method to get env['content_length']
c61b3ce0f5f4203821838c9b1ca00c851192f22b
<ide><path>actionpack/lib/action_dispatch/http/request.rb <ide> def server_software <ide> def raw_post <ide> unless @env.include? 'RAW_POST_DATA' <ide> raw_post_body = body <del> @env['RAW_POST_DATA'] = raw_post_body.read(@env['CONTENT_LENGTH'].to_i) <add> @env['RAW_POST_DATA'] = raw_post_body.read(content_length) <ide> raw_post_body.rewind if raw_post_body.respond_to?(:rewind) <ide> end <ide> @env['RAW_POST_DATA']
1
Javascript
Javascript
convert global variables to local variables
9e64158db30626c7ee1ae70c4b7caecd70a37d51
<ide><path>examples/js/loaders/ColladaLoader.js <ide> THREE.ColladaLoader = function () { <ide> var bones = []; <ide> setupSkeleton( skeleton, bones, -1 ); <ide> setupSkinningMatrices( bones, skinController.skin ); <del> v = new THREE.Vector3(); <add> var v = new THREE.Vector3(); <ide> var skinned = []; <ide> <ide> for (var i = 0; i < geometry.vertices.length; i ++) { <ide> THREE.ColladaLoader = function () { <ide> <ide> if ( bones[ i ].type != 'JOINT' ) continue; <ide> <del> for ( j = 0; j < bones[ i ].weights.length; j ++ ) { <add> for ( var j = 0; j < bones[ i ].weights.length; j ++ ) { <ide> <del> w = bones[ i ].weights[ j ]; <del> vidx = w.index; <del> weight = w.weight; <add> var w = bones[ i ].weights[ j ]; <add> var vidx = w.index; <add> var weight = w.weight; <ide> <del> o = geometry.vertices[vidx]; <del> s = skinned[vidx]; <add> var o = geometry.vertices[vidx]; <add> var s = skinned[vidx]; <ide> <ide> v.x = o.x; <ide> v.y = o.y; <ide> THREE.ColladaLoader = function () { <ide> <ide> function loadTextureImage ( texture, url ) { <ide> <del> loader = new THREE.ImageLoader(); <add> var loader = new THREE.ImageLoader(); <ide> <ide> loader.load( url, function ( image ) { <ide>
1
Text
Text
use a better imperative mood example
aebca38ae6984993b712f122703fba5c14632471
<ide><path>docs/contributing.md <ide> dependencies up to date by running `apm update` after pulling upstream changes. <ide> <ide> ### Commit Message Guidelines <ide> * Use the present tense ("Add feature" not "Added feature") <del> * Use the imperative mood ("Fix bug" not "Fixes bug") <add> * Use the imperative mood ("Move cursor to..." not "Moves cursor to...") <ide> * Limit the first line to 72 characters or less <ide> * Reference issues and pull requests liberally <ide> * Consider starting the commit message with an applicable emoji:
1
Javascript
Javascript
preserve implicit method names
bda199de0440a8dd8d329f1443decd67b9792abc
<ide><path>src/browser/ui/ReactDOMComponent.js <ide> ReactDOMComponent.Mixin = { <ide> * @param {number} mountDepth number of components in the owner hierarchy <ide> * @return {string} The computed markup. <ide> */ <del> mountComponent: ReactPerf.measure( <del> 'ReactDOMComponent', <del> 'mountComponent', <del> function(rootID, transaction, mountDepth, context) { <del> invariant(context !== undefined, "Context is required parameter"); <del> ReactComponent.Mixin.mountComponent.call( <del> this, <del> rootID, <del> transaction, <del> mountDepth, <del> context <del> ); <del> this._rootNodeID = rootID; <del> assertValidProps(this._currentElement.props); <del> var closeTag = omittedCloseTags[this._tag] ? '' : '</' + this._tag + '>'; <del> return ( <del> this._createOpenTagMarkupAndPutListeners(transaction) + <del> this._createContentMarkup(transaction, context) + <del> closeTag <del> ); <del> } <del> ), <add> mountComponent: function(rootID, transaction, mountDepth, context) { <add> invariant(context !== undefined, "Context is required parameter"); <add> ReactComponent.Mixin.mountComponent.call( <add> this, <add> rootID, <add> transaction, <add> mountDepth, <add> context <add> ); <add> this._rootNodeID = rootID; <add> assertValidProps(this._currentElement.props); <add> var closeTag = omittedCloseTags[this._tag] ? '' : '</' + this._tag + '>'; <add> return ( <add> this._createOpenTagMarkupAndPutListeners(transaction) + <add> this._createContentMarkup(transaction, context) + <add> closeTag <add> ); <add> }, <ide> <ide> /** <ide> * Creates markup for the open tag and all attributes. <ide> ReactDOMComponent.Mixin = { <ide> * @internal <ide> * @overridable <ide> */ <del> updateComponent: ReactPerf.measure( <del> 'ReactDOMComponent', <del> 'updateComponent', <del> function(transaction, prevElement, nextElement, context) { <del> if(context === undefined) throw new Error("Context required for mounting"); <del> if(context === null) throw new Error("Assert: context is not null"); <del> assertValidProps(this._currentElement.props); <del> ReactComponent.Mixin.updateComponent.call( <del> this, <del> transaction, <del> prevElement, <del> nextElement, <del> context <del> ); <del> this._updateDOMProperties(prevElement.props, transaction); <del> this._updateDOMChildren(prevElement.props, transaction, context); <del> } <del> ), <add> updateComponent: function(transaction, prevElement, nextElement, context) { <add> if(context === undefined) throw new Error("Context required for mounting"); <add> if(context === null) throw new Error("Assert: context is not null"); <add> assertValidProps(this._currentElement.props); <add> ReactComponent.Mixin.updateComponent.call( <add> this, <add> transaction, <add> prevElement, <add> nextElement, <add> context <add> ); <add> this._updateDOMProperties(prevElement.props, transaction); <add> this._updateDOMChildren(prevElement.props, transaction, context); <add> }, <ide> <ide> /** <ide> * Reconciles the properties by detecting differences in property values and <ide> ReactDOMComponent.Mixin = { <ide> <ide> }; <ide> <add>ReactPerf.measureMethods(ReactDOMComponent, 'ReactDOMComponent', { <add> mountComponent: 'mountComponent', <add> updateComponent: 'updateComponent' <add>}); <add> <ide> assign( <ide> ReactDOMComponent.prototype, <ide> ReactComponent.Mixin, <ide><path>src/browser/ui/ReactDOMIDOperations.js <ide> var ReactDOMIDOperations = { <ide> * @param {*} value New value of the property. <ide> * @internal <ide> */ <del> updatePropertyByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'updatePropertyByID', <del> function(id, name, value) { <del> var node = ReactMount.getNode(id); <del> invariant( <del> !INVALID_PROPERTY_ERRORS.hasOwnProperty(name), <del> 'updatePropertyByID(...): %s', <del> INVALID_PROPERTY_ERRORS[name] <del> ); <del> <del> // If we're updating to null or undefined, we should remove the property <del> // from the DOM node instead of inadvertantly setting to a string. This <del> // brings us in line with the same behavior we have on initial render. <del> if (value != null) { <del> DOMPropertyOperations.setValueForProperty(node, name, value); <del> } else { <del> DOMPropertyOperations.deleteValueForProperty(node, name); <del> } <add> updatePropertyByID: function(id, name, value) { <add> var node = ReactMount.getNode(id); <add> invariant( <add> !INVALID_PROPERTY_ERRORS.hasOwnProperty(name), <add> 'updatePropertyByID(...): %s', <add> INVALID_PROPERTY_ERRORS[name] <add> ); <add> <add> // If we're updating to null or undefined, we should remove the property <add> // from the DOM node instead of inadvertantly setting to a string. This <add> // brings us in line with the same behavior we have on initial render. <add> if (value != null) { <add> DOMPropertyOperations.setValueForProperty(node, name, value); <add> } else { <add> DOMPropertyOperations.deleteValueForProperty(node, name); <ide> } <del> ), <add> }, <ide> <ide> /** <ide> * Updates a DOM node to remove a property. This should only be used to remove <ide> var ReactDOMIDOperations = { <ide> * @param {string} name A property name to remove, see `DOMProperty`. <ide> * @internal <ide> */ <del> deletePropertyByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'deletePropertyByID', <del> function(id, name, value) { <del> var node = ReactMount.getNode(id); <del> invariant( <del> !INVALID_PROPERTY_ERRORS.hasOwnProperty(name), <del> 'updatePropertyByID(...): %s', <del> INVALID_PROPERTY_ERRORS[name] <del> ); <del> DOMPropertyOperations.deleteValueForProperty(node, name, value); <del> } <del> ), <add> deletePropertyByID: function(id, name, value) { <add> var node = ReactMount.getNode(id); <add> invariant( <add> !INVALID_PROPERTY_ERRORS.hasOwnProperty(name), <add> 'updatePropertyByID(...): %s', <add> INVALID_PROPERTY_ERRORS[name] <add> ); <add> DOMPropertyOperations.deleteValueForProperty(node, name, value); <add> }, <ide> <ide> /** <ide> * Updates a DOM node with new style values. If a value is specified as '', <ide> var ReactDOMIDOperations = { <ide> * @param {object} styles Mapping from styles to values. <ide> * @internal <ide> */ <del> updateStylesByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'updateStylesByID', <del> function(id, styles) { <del> var node = ReactMount.getNode(id); <del> CSSPropertyOperations.setValueForStyles(node, styles); <del> } <del> ), <add> updateStylesByID: function(id, styles) { <add> var node = ReactMount.getNode(id); <add> CSSPropertyOperations.setValueForStyles(node, styles); <add> }, <ide> <ide> /** <ide> * Updates a DOM node's innerHTML. <ide> var ReactDOMIDOperations = { <ide> * @param {string} html An HTML string. <ide> * @internal <ide> */ <del> updateInnerHTMLByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'updateInnerHTMLByID', <del> function(id, html) { <del> var node = ReactMount.getNode(id); <del> setInnerHTML(node, html); <del> } <del> ), <add> updateInnerHTMLByID: function(id, html) { <add> var node = ReactMount.getNode(id); <add> setInnerHTML(node, html); <add> }, <ide> <ide> /** <ide> * Updates a DOM node's text content set by `props.content`. <ide> var ReactDOMIDOperations = { <ide> * @param {string} content Text content. <ide> * @internal <ide> */ <del> updateTextContentByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'updateTextContentByID', <del> function(id, content) { <del> var node = ReactMount.getNode(id); <del> DOMChildrenOperations.updateTextContent(node, content); <del> } <del> ), <add> updateTextContentByID: function(id, content) { <add> var node = ReactMount.getNode(id); <add> DOMChildrenOperations.updateTextContent(node, content); <add> }, <ide> <ide> /** <ide> * Replaces a DOM node that exists in the document with markup. <ide> var ReactDOMIDOperations = { <ide> * @internal <ide> * @see {Danger.dangerouslyReplaceNodeWithMarkup} <ide> */ <del> dangerouslyReplaceNodeWithMarkupByID: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'dangerouslyReplaceNodeWithMarkupByID', <del> function(id, markup) { <add> dangerouslyReplaceNodeWithMarkupByID: function(id, markup) { <ide> var node = ReactMount.getNode(id); <ide> DOMChildrenOperations.dangerouslyReplaceNodeWithMarkup(node, markup); <del> } <del> ), <add> }, <ide> <ide> /** <ide> * Updates a component's children by processing a series of updates. <ide> var ReactDOMIDOperations = { <ide> * @param {array<string>} markup List of markup strings. <ide> * @internal <ide> */ <del> dangerouslyProcessChildrenUpdates: ReactPerf.measure( <del> 'ReactDOMIDOperations', <del> 'dangerouslyProcessChildrenUpdates', <del> function(updates, markup) { <del> for (var i = 0; i < updates.length; i++) { <del> updates[i].parentNode = ReactMount.getNode(updates[i].parentID); <del> } <del> DOMChildrenOperations.processUpdates(updates, markup); <add> dangerouslyProcessChildrenUpdates: function(updates, markup) { <add> for (var i = 0; i < updates.length; i++) { <add> updates[i].parentNode = ReactMount.getNode(updates[i].parentID); <ide> } <del> ) <add> DOMChildrenOperations.processUpdates(updates, markup); <add> } <ide> }; <ide> <add>ReactPerf.measureMethods(ReactDOMIDOperations, 'ReactDOMIDOperations', { <add> updatePropertyByID: 'updatePropertyByID', <add> deletePropertyByID: 'deletePropertyByID', <add> updateStylesByID: 'updateStylesByID', <add> updateInnerHTMLByID: 'updateInnerHTMLByID', <add> updateTextContentByID: 'updateTextContentByID', <add> dangerouslyReplaceNodeWithMarkupByID: 'dangerouslyReplaceNodeWithMarkupByID', <add> dangerouslyProcessChildrenUpdates: 'dangerouslyProcessChildrenUpdates' <add>}); <add> <ide> module.exports = ReactDOMIDOperations; <ide><path>src/browser/ui/ReactMount.js <ide> var ReactMount = { <ide> * @param {boolean} shouldReuseMarkup if we should skip the markup insertion <ide> * @return {ReactComponent} nextComponent <ide> */ <del> _renderNewRootComponent: ReactPerf.measure( <del> 'ReactMount', <del> '_renderNewRootComponent', <del> function( <del> nextComponent, <del> container, <del> shouldReuseMarkup) { <del> // Various parts of our code (such as ReactCompositeComponent's <del> // _renderValidatedComponent) assume that calls to render aren't nested; <del> // verify that that's the case. <del> warning( <del> ReactCurrentOwner.current == null, <del> '_renderNewRootComponent(): Render methods should be a pure function ' + <del> 'of props and state; triggering nested component updates from ' + <del> 'render is not allowed. If necessary, trigger nested updates in ' + <del> 'componentDidUpdate.' <del> ); <del> <del> var componentInstance = instantiateReactComponent(nextComponent, null); <del> var reactRootID = ReactMount._registerComponent( <del> componentInstance, <del> container <del> ); <del> <del> var transaction = ReactUpdates.ReactReconcileTransaction.getPooled(); <del> transaction.perform( <del> mountComponentIntoNode, <del> componentInstance, <del> reactRootID, <del> container, <del> transaction, <del> shouldReuseMarkup <del> ); <del> ReactUpdates.ReactReconcileTransaction.release(transaction); <del> <del> if (__DEV__) { <del> // Record the root element in case it later gets transplanted. <del> rootElementsByReactRootID[reactRootID] = <del> getReactRootElementInContainer(container); <del> } <add> _renderNewRootComponent: function( <add> nextComponent, <add> container, <add> shouldReuseMarkup <add> ) { <add> // Various parts of our code (such as ReactCompositeComponent's <add> // _renderValidatedComponent) assume that calls to render aren't nested; <add> // verify that that's the case. <add> warning( <add> ReactCurrentOwner.current == null, <add> '_renderNewRootComponent(): Render methods should be a pure function ' + <add> 'of props and state; triggering nested component updates from ' + <add> 'render is not allowed. If necessary, trigger nested updates in ' + <add> 'componentDidUpdate.' <add> ); <add> <add> var componentInstance = instantiateReactComponent(nextComponent, null); <add> var reactRootID = ReactMount._registerComponent( <add> componentInstance, <add> container <add> ); <ide> <del> return componentInstance; <add> var transaction = ReactUpdates.ReactReconcileTransaction.getPooled(); <add> transaction.perform( <add> mountComponentIntoNode, <add> componentInstance, <add> reactRootID, <add> container, <add> transaction, <add> shouldReuseMarkup <add> ); <add> ReactUpdates.ReactReconcileTransaction.release(transaction); <add> <add> if (__DEV__) { <add> // Record the root element in case it later gets transplanted. <add> rootElementsByReactRootID[reactRootID] = <add> getReactRootElementInContainer(container); <ide> } <del> ), <add> <add> return componentInstance; <add> }, <ide> <ide> /** <ide> * Renders a React component into the DOM in the supplied `container`. <ide> var ReactMount = { <ide> ); <ide> }, <ide> <del> _mountImageIntoNode: ReactPerf.measure( <del> 'ReactMount', <del> '_mountImageIntoNode', <del> function(markup, container, shouldReuseMarkup) { <del> invariant( <del> container && ( <del> container.nodeType === ELEMENT_NODE_TYPE || <del> container.nodeType === DOC_NODE_TYPE <del> ), <del> 'mountComponentIntoNode(...): Target container is not valid.' <del> ); <del> <del> if (shouldReuseMarkup) { <del> if (ReactMarkupChecksum.canReuseMarkup( <del> markup, <del> getReactRootElementInContainer(container))) { <del> return; <del> } else { <del> invariant( <del> container.nodeType !== DOC_NODE_TYPE, <del> 'You\'re trying to render a component to the document using ' + <del> 'server rendering but the checksum was invalid. This usually ' + <del> 'means you rendered a different component type or props on ' + <del> 'the client from the one on the server, or your render() ' + <del> 'methods are impure. React cannot handle this case due to ' + <del> 'cross-browser quirks by rendering at the document root. You ' + <del> 'should look for environment dependent code in your components ' + <del> 'and ensure the props are the same client and server side.' <del> ); <add> _mountImageIntoNode: function(markup, container, shouldReuseMarkup) { <add> invariant( <add> container && ( <add> container.nodeType === ELEMENT_NODE_TYPE || <add> container.nodeType === DOC_NODE_TYPE <add> ), <add> 'mountComponentIntoNode(...): Target container is not valid.' <add> ); <ide> <del> if (__DEV__) { <del> console.warn( <del> 'React attempted to use reuse markup in a container but the ' + <del> 'checksum was invalid. This generally means that you are ' + <del> 'using server rendering and the markup generated on the ' + <del> 'server was not what the client was expecting. React injected ' + <del> 'new markup to compensate which works but you have lost many ' + <del> 'of the benefits of server rendering. Instead, figure out ' + <del> 'why the markup being generated is different on the client ' + <del> 'or server.' <del> ); <del> } <add> if (shouldReuseMarkup) { <add> if (ReactMarkupChecksum.canReuseMarkup( <add> markup, <add> getReactRootElementInContainer(container))) { <add> return; <add> } else { <add> invariant( <add> container.nodeType !== DOC_NODE_TYPE, <add> 'You\'re trying to render a component to the document using ' + <add> 'server rendering but the checksum was invalid. This usually ' + <add> 'means you rendered a different component type or props on ' + <add> 'the client from the one on the server, or your render() ' + <add> 'methods are impure. React cannot handle this case due to ' + <add> 'cross-browser quirks by rendering at the document root. You ' + <add> 'should look for environment dependent code in your components ' + <add> 'and ensure the props are the same client and server side.' <add> ); <add> <add> if (__DEV__) { <add> console.warn( <add> 'React attempted to use reuse markup in a container but the ' + <add> 'checksum was invalid. This generally means that you are ' + <add> 'using server rendering and the markup generated on the ' + <add> 'server was not what the client was expecting. React injected ' + <add> 'new markup to compensate which works but you have lost many ' + <add> 'of the benefits of server rendering. Instead, figure out ' + <add> 'why the markup being generated is different on the client ' + <add> 'or server.' <add> ); <ide> } <ide> } <add> } <ide> <del> invariant( <del> container.nodeType !== DOC_NODE_TYPE, <del> 'You\'re trying to render a component to the document but ' + <del> 'you didn\'t use server rendering. We can\'t do this ' + <del> 'without using server rendering due to cross-browser quirks. ' + <del> 'See renderComponentToString() for server rendering.' <del> ); <add> invariant( <add> container.nodeType !== DOC_NODE_TYPE, <add> 'You\'re trying to render a component to the document but ' + <add> 'you didn\'t use server rendering. We can\'t do this ' + <add> 'without using server rendering due to cross-browser quirks. ' + <add> 'See renderComponentToString() for server rendering.' <add> ); <ide> <del> setInnerHTML(container, markup); <del> } <del> ), <add> setInnerHTML(container, markup); <add> }, <ide> <ide> /** <ide> * React ID utilities. <ide> var ReactMount = { <ide> purgeID: purgeID <ide> }; <ide> <add>ReactPerf.measureMethods(ReactMount, 'ReactMount', { <add> _renderNewRootComponent: '_renderNewRootComponent', <add> _mountImageIntoNode: '_mountImageIntoNode' <add>}); <add> <ide> // Deprecations (remove for 0.13) <ide> ReactMount.renderComponent = deprecated( <ide> 'ReactMount', <ide><path>src/core/ReactCompositeComponent.js <ide> var ReactCompositeComponentMixin = assign({}, <ide> * @final <ide> * @internal <ide> */ <del> mountComponent: ReactPerf.measure( <del> 'ReactCompositeComponent', <del> 'mountComponent', <del> function(rootID, transaction, mountDepth, context) { <del> invariant(context !== undefined, "Context is required parameter"); <del> ReactComponent.Mixin.mountComponent.call( <del> this, <del> rootID, <del> transaction, <del> mountDepth, <del> context <del> ); <add> mountComponent: function(rootID, transaction, mountDepth, context) { <add> invariant(context !== undefined, "Context is required parameter"); <add> ReactComponent.Mixin.mountComponent.call( <add> this, <add> rootID, <add> transaction, <add> mountDepth, <add> context <add> ); <ide> <del> this._context = context; <del> this._rootNodeID = rootID; <add> this._context = context; <add> this._rootNodeID = rootID; <ide> <del> var inst = this._instance; <add> var inst = this._instance; <ide> <del> // Store a reference from the instance back to the internal representation <del> ReactInstanceMap.set(inst, this); <add> // Store a reference from the instance back to the internal representation <add> ReactInstanceMap.set(inst, this); <ide> <del> this._compositeLifeCycleState = CompositeLifeCycle.MOUNTING; <add> this._compositeLifeCycleState = CompositeLifeCycle.MOUNTING; <ide> <del> inst.context = this._processContext(this._currentElement._context); <del> if (__DEV__) { <del> this._warnIfContextsDiffer(this._currentElement._context, context); <del> } <del> inst.props = this._processProps(this._currentElement.props); <add> inst.context = this._processContext(this._currentElement._context); <add> if (__DEV__) { <add> this._warnIfContextsDiffer(this._currentElement._context, context); <add> } <add> inst.props = this._processProps(this._currentElement.props); <ide> <del> var initialState = inst.getInitialState ? inst.getInitialState() : null; <del> if (__DEV__) { <del> // We allow auto-mocks to proceed as if they're returning null. <del> if (typeof initialState === 'undefined' && <del> inst.getInitialState._isMockFunction) { <del> // This is probably bad practice. Consider warning here and <del> // deprecating this convenience. <del> initialState = null; <del> } <del> } <del> invariant( <del> typeof initialState === 'object' && !Array.isArray(initialState), <del> '%s.getInitialState(): must return an object or null', <del> inst.constructor.displayName || 'ReactCompositeComponent' <del> ); <del> inst.state = initialState; <del> <del> this._pendingState = null; <del> this._pendingForceUpdate = false; <del> <del> if (inst.componentWillMount) { <del> inst.componentWillMount(); <del> // When mounting, calls to `setState` by `componentWillMount` will set <del> // `this._pendingState` without triggering a re-render. <del> if (this._pendingState) { <del> inst.state = this._pendingState; <del> this._pendingState = null; <del> } <add> var initialState = inst.getInitialState ? inst.getInitialState() : null; <add> if (__DEV__) { <add> // We allow auto-mocks to proceed as if they're returning null. <add> if (typeof initialState === 'undefined' && <add> inst.getInitialState._isMockFunction) { <add> // This is probably bad practice. Consider warning here and <add> // deprecating this convenience. <add> initialState = null; <ide> } <add> } <add> invariant( <add> typeof initialState === 'object' && !Array.isArray(initialState), <add> '%s.getInitialState(): must return an object or null', <add> inst.constructor.displayName || 'ReactCompositeComponent' <add> ); <add> inst.state = initialState; <ide> <del> var renderedElement = this._renderValidatedComponent(); <del> this._renderedComponent = this._instantiateReactComponent( <del> renderedElement, <del> this._currentElement.type // The wrapping type <del> ); <add> this._pendingState = null; <add> this._pendingForceUpdate = false; <ide> <del> // Done with mounting, `setState` will now trigger UI changes. <del> this._compositeLifeCycleState = null; <del> var markup = this._renderedComponent.mountComponent( <del> rootID, <del> transaction, <del> mountDepth + 1, <del> this._processChildContext(context) <del> ); <del> if (inst.componentDidMount) { <del> transaction.getReactMountReady().enqueue(inst.componentDidMount, inst); <add> if (inst.componentWillMount) { <add> inst.componentWillMount(); <add> // When mounting, calls to `setState` by `componentWillMount` will set <add> // `this._pendingState` without triggering a re-render. <add> if (this._pendingState) { <add> inst.state = this._pendingState; <add> this._pendingState = null; <ide> } <del> return markup; <ide> } <del> ), <add> <add> var renderedElement = this._renderValidatedComponent(); <add> this._renderedComponent = this._instantiateReactComponent( <add> renderedElement, <add> this._currentElement.type // The wrapping type <add> ); <add> <add> // Done with mounting, `setState` will now trigger UI changes. <add> this._compositeLifeCycleState = null; <add> var markup = this._renderedComponent.mountComponent( <add> rootID, <add> transaction, <add> mountDepth + 1, <add> this._processChildContext(context) <add> ); <add> if (inst.componentDidMount) { <add> transaction.getReactMountReady().enqueue(inst.componentDidMount, inst); <add> } <add> return markup; <add> }, <ide> <ide> /** <ide> * Releases any resources allocated by `mountComponent`. <ide> var ReactCompositeComponentMixin = assign({}, <ide> * @internal <ide> * @overridable <ide> */ <del> updateComponent: ReactPerf.measure( <del> 'ReactCompositeComponent', <del> 'updateComponent', <del> function( <del> transaction, <del> prevParentElement, <del> nextParentElement, <del> prevUnmaskedContext, <del> nextUnmaskedContext) { <del> // Update refs regardless of what shouldComponentUpdate returns <del> ReactComponent.Mixin.updateComponent.call( <del> this, <del> transaction, <del> prevParentElement, <del> nextParentElement, <del> prevUnmaskedContext, <del> nextUnmaskedContext <del> ); <add> updateComponent: function( <add> transaction, <add> prevParentElement, <add> nextParentElement, <add> prevUnmaskedContext, <add> nextUnmaskedContext <add> ) { <add> // Update refs regardless of what shouldComponentUpdate returns <add> ReactComponent.Mixin.updateComponent.call( <add> this, <add> transaction, <add> prevParentElement, <add> nextParentElement, <add> prevUnmaskedContext, <add> nextUnmaskedContext <add> ); <ide> <del> var inst = this._instance; <add> var inst = this._instance; <ide> <del> var prevContext = inst.context; <del> var prevProps = inst.props; <del> var nextContext = prevContext; <del> var nextProps = prevProps; <del> // Distinguish between a props update versus a simple state update <del> if (prevParentElement !== nextParentElement) { <del> nextContext = this._processContext(nextParentElement._context); <del> nextProps = this._processProps(nextParentElement.props); <del> <del> this._compositeLifeCycleState = CompositeLifeCycle.RECEIVING_PROPS; <del> if (inst.componentWillReceiveProps) { <del> inst.componentWillReceiveProps(nextProps, nextContext); <del> } <add> var prevContext = inst.context; <add> var prevProps = inst.props; <add> var nextContext = prevContext; <add> var nextProps = prevProps; <add> // Distinguish between a props update versus a simple state update <add> if (prevParentElement !== nextParentElement) { <add> nextContext = this._processContext(nextParentElement._context); <add> nextProps = this._processProps(nextParentElement.props); <add> <add> this._compositeLifeCycleState = CompositeLifeCycle.RECEIVING_PROPS; <add> if (inst.componentWillReceiveProps) { <add> inst.componentWillReceiveProps(nextProps, nextContext); <ide> } <add> } <ide> <del> this._compositeLifeCycleState = null; <add> this._compositeLifeCycleState = null; <ide> <del> var nextState = this._pendingState || inst.state; <del> this._pendingState = null; <add> var nextState = this._pendingState || inst.state; <add> this._pendingState = null; <ide> <del> var shouldUpdate = <del> this._pendingForceUpdate || <del> !inst.shouldComponentUpdate || <del> inst.shouldComponentUpdate(nextProps, nextState, nextContext); <add> var shouldUpdate = <add> this._pendingForceUpdate || <add> !inst.shouldComponentUpdate || <add> inst.shouldComponentUpdate(nextProps, nextState, nextContext); <ide> <del> if (__DEV__) { <del> if (typeof shouldUpdate === "undefined") { <del> console.warn( <del> (inst.constructor.displayName || 'ReactCompositeComponent') + <del> '.shouldComponentUpdate(): Returned undefined instead of a ' + <del> 'boolean value. Make sure to return true or false.' <del> ); <del> } <add> if (__DEV__) { <add> if (typeof shouldUpdate === "undefined") { <add> console.warn( <add> (inst.constructor.displayName || 'ReactCompositeComponent') + <add> '.shouldComponentUpdate(): Returned undefined instead of a ' + <add> 'boolean value. Make sure to return true or false.' <add> ); <ide> } <add> } <ide> <del> if (!shouldUpdate) { <del> // If it's determined that a component should not update, we still want <del> // to set props and state but we shortcut the rest of the update. <del> this._currentElement = nextParentElement; <del> this._context = nextUnmaskedContext; <del> inst.props = nextProps; <del> inst.state = nextState; <del> inst.context = nextContext; <del> return; <del> } <add> if (!shouldUpdate) { <add> // If it's determined that a component should not update, we still want <add> // to set props and state but we shortcut the rest of the update. <add> this._currentElement = nextParentElement; <add> this._context = nextUnmaskedContext; <add> inst.props = nextProps; <add> inst.state = nextState; <add> inst.context = nextContext; <add> return; <add> } <ide> <del> this._pendingForceUpdate = false; <del> // Will set `this.props`, `this.state` and `this.context`. <del> this._performComponentUpdate( <del> nextParentElement, <del> nextProps, <del> nextState, <del> nextContext, <del> transaction, <del> nextUnmaskedContext <del> ); <del> }), <add> this._pendingForceUpdate = false; <add> // Will set `this.props`, `this.state` and `this.context`. <add> this._performComponentUpdate( <add> nextParentElement, <add> nextProps, <add> nextState, <add> nextContext, <add> transaction, <add> nextUnmaskedContext <add> ); <add> }, <ide> <ide> /** <ide> * Merges new props and state, notifies delegate methods of update and <ide> var ReactCompositeComponentMixin = assign({}, <ide> /** <ide> * @private <ide> */ <del> _renderValidatedComponent: ReactPerf.measure( <del> 'ReactCompositeComponent', <del> '_renderValidatedComponent', <del> function() { <del> var renderedComponent; <del> var previousContext = ReactContext.current; <del> ReactContext.current = this._processChildContext( <del> this._currentElement._context <del> ); <del> ReactCurrentOwner.current = this; <del> var inst = this._instance; <del> try { <del> renderedComponent = <del> this._renderValidatedComponentWithoutOwnerOrContext(); <del> } finally { <del> ReactContext.current = previousContext; <del> ReactCurrentOwner.current = null; <del> } <del> invariant( <del> // TODO: An `isValidNode` function would probably be more appropriate <del> renderedComponent === null || renderedComponent === false || <del> ReactElement.isValidElement(renderedComponent), <del> '%s.render(): A valid ReactComponent must be returned. You may have ' + <del> 'returned undefined, an array or some other invalid object.', <del> inst.constructor.displayName || 'ReactCompositeComponent' <del> ); <del> return renderedComponent; <add> _renderValidatedComponent: function() { <add> var renderedComponent; <add> var previousContext = ReactContext.current; <add> ReactContext.current = this._processChildContext( <add> this._currentElement._context <add> ); <add> ReactCurrentOwner.current = this; <add> var inst = this._instance; <add> try { <add> renderedComponent = <add> this._renderValidatedComponentWithoutOwnerOrContext(); <add> } finally { <add> ReactContext.current = previousContext; <add> ReactCurrentOwner.current = null; <ide> } <del> ), <add> invariant( <add> // TODO: An `isValidNode` function would probably be more appropriate <add> renderedComponent === null || renderedComponent === false || <add> ReactElement.isValidElement(renderedComponent), <add> '%s.render(): A valid ReactComponent must be returned. You may have ' + <add> 'returned undefined, an array or some other invalid object.', <add> inst.constructor.displayName || 'ReactCompositeComponent' <add> ); <add> return renderedComponent; <add> }, <ide> <ide> /** <ide> * Lazily allocates the refs object and stores `component` as `ref`. <ide> var ShallowMixin = assign({}, <ide> <ide> }); <ide> <add>ReactPerf.measureMethods( <add> ReactCompositeComponentMixin, <add> 'ReactCompositeComponent', <add> { <add> mountComponent: 'mountComponent', <add> updateComponent: 'updateComponent', <add> _renderValidatedComponent: '_renderValidatedComponent' <add> } <add>); <add> <ide> var ReactCompositeComponent = { <ide> <ide> LifeCycle: CompositeLifeCycle, <ide><path>src/core/ReactUpdates.js <ide> function runBatchedUpdates(transaction) { <ide> } <ide> } <ide> <del>var flushBatchedUpdates = ReactPerf.measure( <del> 'ReactUpdates', <del> 'flushBatchedUpdates', <del> function() { <del> // ReactUpdatesFlushTransaction's wrappers will clear the dirtyComponents <del> // array and perform any updates enqueued by mount-ready handlers (i.e., <del> // componentDidUpdate) but we need to check here too in order to catch <del> // updates enqueued by setState callbacks and asap calls. <del> while (dirtyComponents.length || asapEnqueued) { <del> if (dirtyComponents.length) { <del> var transaction = ReactUpdatesFlushTransaction.getPooled(); <del> transaction.perform(runBatchedUpdates, null, transaction); <del> ReactUpdatesFlushTransaction.release(transaction); <del> } <add>var flushBatchedUpdates = function() { <add> // ReactUpdatesFlushTransaction's wrappers will clear the dirtyComponents <add> // array and perform any updates enqueued by mount-ready handlers (i.e., <add> // componentDidUpdate) but we need to check here too in order to catch <add> // updates enqueued by setState callbacks and asap calls. <add> while (dirtyComponents.length || asapEnqueued) { <add> if (dirtyComponents.length) { <add> var transaction = ReactUpdatesFlushTransaction.getPooled(); <add> transaction.perform(runBatchedUpdates, null, transaction); <add> ReactUpdatesFlushTransaction.release(transaction); <add> } <ide> <del> if (asapEnqueued) { <del> asapEnqueued = false; <del> var queue = asapCallbackQueue; <del> asapCallbackQueue = CallbackQueue.getPooled(); <del> queue.notifyAll(); <del> CallbackQueue.release(queue); <del> } <add> if (asapEnqueued) { <add> asapEnqueued = false; <add> var queue = asapCallbackQueue; <add> asapCallbackQueue = CallbackQueue.getPooled(); <add> queue.notifyAll(); <add> CallbackQueue.release(queue); <ide> } <ide> } <add>}; <add>flushBatchedUpdates = ReactPerf.measure( <add> 'ReactUpdates', <add> 'flushBatchedUpdates', <add> flushBatchedUpdates <ide> ); <ide> <ide> /** <ide><path>src/test/ReactPerf.js <ide> var ReactPerf = { <ide> */ <ide> storedMeasure: _noMeasure, <ide> <add> /** <add> * @param {object} object <add> * @param {string} objectName <add> * @param {object<string>} methodNames <add> */ <add> measureMethods: function(object, objectName, methodNames) { <add> if (__DEV__) { <add> for (var key in methodNames) { <add> if (!methodNames.hasOwnProperty(key)) { <add> continue; <add> } <add> object[key] = ReactPerf.measure( <add> objectName, <add> methodNames[key], <add> object[key] <add> ); <add> } <add> } <add> }, <add> <ide> /** <ide> * Use this to wrap methods you want to measure. Zero overhead in production. <ide> *
6
Javascript
Javascript
fix createeventhandle bug with comment containers
147179ae82039f38fa1a9a72402f578af8fb3ea3
<ide><path>packages/react-dom/src/client/ReactDOMEventHandle.js <ide> import { <ide> getEventListenerMap, <ide> getFiberFromScopeInstance, <ide> } from './ReactDOMComponentTree'; <del>import {ELEMENT_NODE} from '../shared/HTMLNodeType'; <add>import {ELEMENT_NODE, COMMENT_NODE} from '../shared/HTMLNodeType'; <ide> import { <ide> listenToNativeEvent, <ide> addEventTypeToDispatchConfig, <ide> function registerEventOnNearestTargetContainer( <ide> ): void { <ide> // If it is, find the nearest root or portal and make it <ide> // our event handle target container. <del> const targetContainer = getNearestRootOrPortalContainer(targetFiber); <add> let targetContainer = getNearestRootOrPortalContainer(targetFiber); <ide> if (targetContainer === null) { <ide> invariant( <ide> false, <ide> 'ReactDOM.createEventHandle: setListener called on an target ' + <ide> 'that did not have a corresponding root. This is likely a bug in React.', <ide> ); <ide> } <add> if (targetContainer.nodeType === COMMENT_NODE) { <add> targetContainer = ((targetContainer.parentNode: any): Element); <add> } <ide> const listenerMap = getEventListenerMap(targetContainer); <ide> listenToNativeEvent( <ide> topLevelType, <ide><path>packages/react-dom/src/events/__tests__/DOMModernPluginEventSystem-test.internal.js <ide> describe('DOMModernPluginEventSystem', () => { <ide> <ide> expect(onClick).toHaveBeenCalledTimes(1); <ide> }); <add> <add> // @gate experimental <add> it('handle propagation of click events between disjointed comment roots', () => { <add> const buttonRef = React.createRef(); <add> const divRef = React.createRef(); <add> const log = []; <add> const setClick = ReactDOM.unstable_createEventHandle('click'); <add> const setClickCapture = ReactDOM.unstable_createEventHandle( <add> 'click', <add> {capture: true}, <add> ); <add> const onClick = jest.fn(e => <add> log.push(['bubble', e.currentTarget]), <add> ); <add> const onClickCapture = jest.fn(e => <add> log.push(['capture', e.currentTarget]), <add> ); <add> <add> function Child() { <add> React.useEffect(() => { <add> const click1 = setClick(divRef.current, onClick); <add> const click2 = setClickCapture( <add> divRef.current, <add> onClickCapture, <add> ); <add> return () => { <add> click1(); <add> click2(); <add> }; <add> }); <add> <add> return <div ref={divRef}>Click me!</div>; <add> } <add> <add> function Parent() { <add> React.useEffect(() => { <add> const click1 = setClick(buttonRef.current, onClick); <add> const click2 = setClickCapture( <add> buttonRef.current, <add> onClickCapture, <add> ); <add> return () => { <add> click1(); <add> click2(); <add> }; <add> }); <add> <add> return <button ref={buttonRef} />; <add> } <add> <add> // We use a comment node here, then mount to it <add> const disjointedNode = document.createComment( <add> ' react-mount-point-unstable ', <add> ); <add> ReactDOM.render(<Parent />, container); <add> Scheduler.unstable_flushAll(); <add> buttonRef.current.appendChild(disjointedNode); <add> ReactDOM.render(<Child />, disjointedNode); <add> Scheduler.unstable_flushAll(); <add> <add> const buttonElement = buttonRef.current; <add> dispatchClickEvent(buttonElement); <add> expect(onClick).toHaveBeenCalledTimes(1); <add> expect(onClickCapture).toHaveBeenCalledTimes(1); <add> expect(log[0]).toEqual(['capture', buttonElement]); <add> expect(log[1]).toEqual(['bubble', buttonElement]); <add> <add> const divElement = divRef.current; <add> dispatchClickEvent(divElement); <add> expect(onClick).toHaveBeenCalledTimes(3); <add> expect(onClickCapture).toHaveBeenCalledTimes(3); <add> expect(log[2]).toEqual(['capture', buttonElement]); <add> expect(log[3]).toEqual(['capture', divElement]); <add> expect(log[4]).toEqual(['bubble', divElement]); <add> expect(log[5]).toEqual(['bubble', buttonElement]); <add> }); <ide> }); <ide> }); <ide> },
2
Ruby
Ruby
add benchmark inject code
d770a874e466d1e9a8df7672b37102291056cae1
<ide><path>Library/Homebrew/cmd/audit.rb <ide> def audit <ide> ENV.activate_extensions! <ide> ENV.setup_build_environment <ide> <add> if ARGV.switch? "D" <add> FormulaAuditor.module_eval do <add> instance_methods.grep(/audit_/).map do |name| <add> method = instance_method(name) <add> define_method(name) do |*args, &block| <add> begin <add> time = Time.now <add> method.bind(self).call(*args, &block) <add> ensure <add> $times[name] ||= 0 <add> $times[name] += Time.now - time <add> end <add> end <add> end <add> end <add> <add> $times = {} <add> at_exit { puts $times.sort_by{ |k, v| v }.map{ |k, v| "#{k}: #{v}" } } <add> end <add> <ide> ff = if ARGV.named.empty? <ide> Formula <ide> else
1
Text
Text
simplify ccache instructions
31d7d6c994c5b38de1b18a30d31ac580f868eafc
<ide><path>BUILDING.md <ide> installing `ccache` can help to greatly reduce build times. Set up with: <ide> <ide> On GNU/Linux: <ide> <del>```console <del>$ sudo apt install ccache # for Debian/Ubuntu, included in most Linux distros <del>$ ccache -o cache_dir=<tmp_dir> <del>$ ccache -o max_size=5.0G <del>$ export CC="ccache gcc" # add to your .profile <del>$ export CXX="ccache g++" # add to your .profile <add>```bash <add>sudo apt install ccache # for Debian/Ubuntu, included in most Linux distros <add>export CC="ccache gcc" # add to your .profile <add>export CXX="ccache g++" # add to your .profile <ide> ``` <ide> <ide> On macOS: <ide> <del>```console <del>$ brew install ccache # see https://brew.sh <del>$ ccache -o cache_dir=<tmp_dir> <del>$ ccache -o max_size=5.0G <del>$ export CC="ccache cc" # add to ~/.zshrc or other shell config file <del>$ export CXX="ccache c++" # add to ~/.zshrc or other shell config file <add>```bash <add>brew install ccache # see https://brew.sh <add>export CC="ccache cc" # add to ~/.zshrc or other shell config file <add>export CXX="ccache c++" # add to ~/.zshrc or other shell config file <ide> ``` <ide> <ide> This will allow for near-instantaneous rebuilds even when switching branches.
1
Javascript
Javascript
avoid every execution and allocation
56f8c7f39d428c58a7dbeb9683ea051e0b649eb5
<ide><path>src/compose.js <ide> * from right to left. For example, compose(f, g, h) is identical to doing <ide> * (...args) => f(g(h(...args))). <ide> */ <del> <del>export default function compose(...funcs) { <del> return (...args) => { <del> if (funcs.length === 0) { <del> return args[0] <del> } <ide> <add>export default function compose(...funcs) { <add> if (!funcs.length) { <add> return (...args) => args[0] <add> } else { <ide> const last = funcs[funcs.length - 1] <ide> const rest = funcs.slice(0, -1) <del> <del> return rest.reduceRight((composed, f) => f(composed), last(...args)) <add> return (...args) => rest.reduceRight((composed, f) => f(composed), last(...args)) <ide> } <ide> }
1
Text
Text
add ak239 to collaborators
c5ce7f4d9e36fc9f06547de1e41cdf4b2ac02720
<ide><path>README.md <ide> more information about the governance of the Node.js project, see <ide> <ide> ### Collaborators <ide> <add>* [ak239](https://github.com/ak239) - <add>**Aleksei Koziatinskii** &lt;ak239spb@gmail.com&gt; <ide> * [andrasq](https://github.com/andrasq) - <ide> **Andras** &lt;andras@kinvey.com&gt; <ide> * [AndreasMadsen](https://github.com/AndreasMadsen) -
1
Ruby
Ruby
allow http templates again, fix the broken test
76053fe4d12bd2be38e16b3322f9de73e09b43bc
<ide><path>railties/lib/rails/generators/app_base.rb <ide> def apply_rails_template <ide> <ide> def set_default_accessors! <ide> self.rails_template = case options[:template] <del> when /^https:\/\// <add> when /^https?:\/\// <ide> options[:template] <ide> when String <ide> File.expand_path(options[:template], Dir.pwd)
1
Text
Text
add missing import in tests
d9c652813d3ab0ddebbb5503ce8ed57c49e1d0d2
<ide><path>docs/api-guide/testing.md <ide> When testing views directly using a request factory, it's often convenient to be <ide> <ide> To forcibly authenticate a request, use the `force_authenticate()` method. <ide> <add> from rest_framework.tests import force_authenticate <add> <ide> factory = APIRequestFactory() <ide> user = User.objects.get(username='olivia') <ide> view = AccountDetail.as_view()
1
Python
Python
fix tree color and improve display code
41586d60f9b7fedde2c88248bb776f66ccbe8f1c
<ide><path>glances/plugins/glances_processlist.py <ide> def add_tree_decoration(self, child_data, is_last_child, first_level): <ide> # TODO find a way to get rid of hardcoded 12 value <ide> pos.append(i + 12) <ide> <add> # add new curses items for tree decoration <add> new_child_data = [] <add> new_pos = [] <add> for i, m in enumerate(child_data): <add> if i in pos: <add> new_pos.append(len(new_child_data)) <add> new_child_data.append(self.curse_add_line("")) <add> new_child_data.append(m) <add> child_data = new_child_data <add> pos = new_pos <add> <ide> # draw node prefix <ide> if is_last_child: <ide> prefix = "└─" <ide> else: <ide> prefix = "├─" <del> child_data[pos[0]]["msg"] = "%s%s" % (prefix, child_data[pos[0]]["msg"]) <add> child_data[pos[0]]["msg"] = prefix <ide> <ide> # add indentation <ide> for i in pos: <add> spacing = 2 <ide> if first_level: <del> child_data[i]["msg"] = " %s" % (child_data[i]["msg"]) <del> else: <del> child_data[i]["msg"] = " %s" % (child_data[i]["msg"]) <add> spacing = 1 <add> elif is_last_child and (i is not pos[0]): <add> # compensate indentation for missing '│' char <add> spacing = 3 <add> child_data[i]["msg"] = "%s%s" % (" " * spacing, child_data[i]["msg"]) <ide> <ide> if not is_last_child: <ide> # add '│' tree decoration <ide> def add_tree_decoration(self, child_data, is_last_child, first_level): <ide> if first_level: <ide> child_data[i]["msg"] = " │" + old_str[2:] <ide> else: <del> child_data[i]["msg"] = old_str[:3] + "│" + old_str[4:] <add> child_data[i]["msg"] = old_str[:2] + "│" + old_str[3:] <ide> return child_data <ide> <ide> def get_process_curses_data(self, p, first, args):
1
Python
Python
add relations and get tests running
ec096a1caceff6a4f5c75a152dd1c7bea9ed281d
<ide><path>rest_framework/fields.py <ide> class Field(object): <ide> <ide> def __init__(self, read_only=False, write_only=False, <ide> required=None, default=empty, initial=None, source=None, <del> label=None, style=None): <add> label=None, style=None, error_messages=None): <ide> self._creation_counter = Field._creation_counter <ide> Field._creation_counter += 1 <ide> <ide> class CharField(Field): <ide> 'blank': 'This field may not be blank.' <ide> } <ide> <del> def __init__(self, *args, **kwargs): <add> def __init__(self, **kwargs): <ide> self.allow_blank = kwargs.pop('allow_blank', False) <del> super(CharField, self).__init__(*args, **kwargs) <add> self.max_length = kwargs.pop('max_length', None) <add> self.min_length = kwargs.pop('min_length', None) <add> super(CharField, self).__init__(**kwargs) <ide> <ide> def to_native(self, data): <ide> if data == '' and not self.allow_blank: <ide> class ChoiceField(Field): <ide> } <ide> coerce_to_type = str <ide> <del> def __init__(self, *args, **kwargs): <add> def __init__(self, **kwargs): <ide> choices = kwargs.pop('choices') <ide> <ide> assert choices, '`choices` argument is required and may not be empty' <ide> def __init__(self, *args, **kwargs): <ide> str(key): key for key in self.choices.keys() <ide> } <ide> <del> super(ChoiceField, self).__init__(*args, **kwargs) <add> super(ChoiceField, self).__init__(**kwargs) <ide> <ide> def to_native(self, data): <ide> try: <ide> def to_native(self, data): <ide> return data <ide> <ide> <add>class EmailField(CharField): <add> pass # TODO <add> <add> <add>class RegexField(CharField): <add> def __init__(self, **kwargs): <add> self.regex = kwargs.pop('regex') <add> super(CharField, self).__init__(**kwargs) <add> <add> <add>class DateTimeField(CharField): <add> pass # TODO <add> <add> <add>class FileField(Field): <add> pass # TODO <add> <add> <ide> class MethodField(Field): <ide> def __init__(self, **kwargs): <ide> kwargs['source'] = '*' <ide><path>rest_framework/mixins.py <ide> """ <ide> from __future__ import unicode_literals <ide> <del>from django.core.exceptions import ValidationError <ide> from django.http import Http404 <ide> from rest_framework import status <ide> from rest_framework.response import Response <ide><path>rest_framework/relations.py <add>from rest_framework.fields import Field <add>from django.core.exceptions import ObjectDoesNotExist <add>from django.core.urlresolvers import resolve, get_script_prefix <add>from rest_framework.compat import urlparse <add> <add> <add>def get_default_queryset(serializer_class, field_name): <add> manager = getattr(serializer_class.opts.model, field_name) <add> if hasattr(manager, 'related'): <add> # Forward relationships <add> return manager.related.model._default_manager.all() <add> # Reverse relationships <add> return manager.field.rel.to._default_manager.all() <add> <add> <add>class RelatedField(Field): <add> def __init__(self, **kwargs): <add> self.queryset = kwargs.pop('queryset', None) <add> self.many = kwargs.pop('many', False) <add> super(RelatedField, self).__init__(**kwargs) <add> <add> def bind(self, field_name, parent, root): <add> super(RelatedField, self).bind(field_name, parent, root) <add> if self.queryset is None and not self.read_only: <add> self.queryset = get_default_queryset(parent, self.source) <add> <add> <add>class PrimaryKeyRelatedField(RelatedField): <add> MESSAGES = { <add> 'required': 'This field is required.', <add> 'does_not_exist': "Invalid pk '{pk_value}' - object does not exist.", <add> 'incorrect_type': 'Incorrect type. Expected pk value, received {data_type}.', <add> } <add> <add> def from_native(self, data): <add> try: <add> return self.queryset.get(pk=data) <add> except ObjectDoesNotExist: <add> self.fail('does_not_exist', pk_value=data) <add> except (TypeError, ValueError): <add> self.fail('incorrect_type', data_type=type(data).__name__) <add> <add> <add>class HyperlinkedRelatedField(RelatedField): <add> lookup_field = 'pk' <add> <add> MESSAGES = { <add> 'required': 'This field is required.', <add> 'no_match': 'Invalid hyperlink - No URL match', <add> 'incorrect_match': 'Invalid hyperlink - Incorrect URL match.', <add> 'does_not_exist': "Invalid hyperlink - Object does not exist.", <add> 'incorrect_type': 'Incorrect type. Expected URL string, received {data_type}.', <add> } <add> <add> def __init__(self, **kwargs): <add> self.view_name = kwargs.pop('view_name') <add> self.lookup_field = kwargs.pop('lookup_field', self.lookup_field) <add> self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field) <add> super(HyperlinkedRelatedField, self).__init__(**kwargs) <add> <add> def get_object(self, view_name, view_args, view_kwargs): <add> """ <add> Return the object corresponding to a matched URL. <add> <add> Takes the matched URL conf arguments, and should return an <add> object instance, or raise an `ObjectDoesNotExist` exception. <add> """ <add> lookup_value = view_kwargs[self.lookup_url_kwarg] <add> lookup_kwargs = {self.lookup_field: lookup_value} <add> return self.queryset.get(**lookup_kwargs) <add> <add> def from_native(self, value): <add> try: <add> http_prefix = value.startswith(('http:', 'https:')) <add> except AttributeError: <add> self.fail('incorrect_type', type(value).__name__) <add> <add> if http_prefix: <add> # If needed convert absolute URLs to relative path <add> value = urlparse.urlparse(value).path <add> prefix = get_script_prefix() <add> if value.startswith(prefix): <add> value = '/' + value[len(prefix):] <add> <add> try: <add> match = resolve(value) <add> except Exception: <add> self.fail('no_match') <add> <add> if match.view_name != self.view_name: <add> self.fail('incorrect_match') <add> <add> try: <add> return self.get_object(match.view_name, match.args, match.kwargs) <add> except (ObjectDoesNotExist, TypeError, ValueError): <add> self.fail('does_not_exist') <add> <add> <add>class HyperlinkedIdentityField(RelatedField): <add> lookup_field = 'pk' <add> <add> def __init__(self, **kwargs): <add> self.view_name = kwargs.pop('view_name') <add> self.lookup_field = kwargs.pop('lookup_field', self.lookup_field) <add> self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field) <add> super(HyperlinkedIdentityField, self).__init__(**kwargs) <add> <add> <add>class SlugRelatedField(RelatedField): <add> def __init__(self, **kwargs): <add> self.slug_field = kwargs.pop('slug_field', None) <ide><path>rest_framework/renderers.py <ide> def get_rendered_html_form(self, view, method, request): <ide> if request.method == method: <ide> try: <ide> data = request.DATA <del> files = request.FILES <add> # files = request.FILES <ide> except ParseError: <ide> data = None <del> files = None <add> # files = None <ide> else: <ide> data = None <del> files = None <add> # files = None <ide> <ide> with override_method(view, request, method) as request: <ide> obj = getattr(view, 'object', None) <ide> def get_context(self, data, accepted_media_type, renderer_context): <ide> 'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes], <ide> 'response_headers': response_headers, <ide> <del> #'put_form': self.get_rendered_html_form(view, 'PUT', request), <del> #'post_form': self.get_rendered_html_form(view, 'POST', request), <del> #'delete_form': self.get_rendered_html_form(view, 'DELETE', request), <del> #'options_form': self.get_rendered_html_form(view, 'OPTIONS', request), <add> # 'put_form': self.get_rendered_html_form(view, 'PUT', request), <add> # 'post_form': self.get_rendered_html_form(view, 'POST', request), <add> # 'delete_form': self.get_rendered_html_form(view, 'DELETE', request), <add> # 'options_form': self.get_rendered_html_form(view, 'OPTIONS', request), <ide> <ide> 'raw_data_put_form': raw_data_put_form, <ide> 'raw_data_post_form': raw_data_post_form, <ide><path>rest_framework/serializers.py <ide> def get_related_field(self, model_field, related_model, to_many): <ide> <ide> if model_field: <ide> kwargs['required'] = not(model_field.null or model_field.blank) <del> # if model_field.help_text is not None: <del> # kwargs['help_text'] = model_field.help_text <add> # if model_field.help_text is not None: <add> # kwargs['help_text'] = model_field.help_text <ide> if model_field.verbose_name is not None: <ide> kwargs['label'] = model_field.verbose_name <ide> if not model_field.editable: <ide> def __init__(self, meta): <ide> class HyperlinkedModelSerializer(ModelSerializer): <ide> _options_class = HyperlinkedModelSerializerOptions <ide> _default_view_name = '%(model_name)s-detail' <del> #_hyperlink_field_class = HyperlinkedRelatedField <del> #_hyperlink_identify_field_class = HyperlinkedIdentityField <add> # _hyperlink_field_class = HyperlinkedRelatedField <add> # _hyperlink_identify_field_class = HyperlinkedIdentityField <ide> <ide> def get_default_fields(self): <ide> fields = super(HyperlinkedModelSerializer, self).get_default_fields() <ide><path>rest_framework/utils/html.py <ide> """ <ide> Helpers for dealing with HTML input. <ide> """ <add>import re <add> <ide> <ide> def is_html_input(dictionary): <ide> # MultiDict type datastructures are used to represent HTML form input, <ide><path>tests/test_serializer.py <ide> def test_url_field(self): <ide> <ide> <ide> class MetadataSerializer(serializers.Serializer): <del> field1 = serializers.CharField(3, required=True) <del> field2 = serializers.CharField(10, required=False) <add> field1 = serializers.CharField(max_length=3, required=True) <add> field2 = serializers.CharField(max_length=10, required=False) <ide> <ide> <ide> class MetadataSerializerTestCase(TestCase):
7
Text
Text
add link to debugger in process.md
8d23afc807303a9afc1851802e1d88f0d3a84c29
<ide><path>doc/api/process.md <ide> terminal programs. <ide> <ide> It is important to take note of the following: <ide> <del>* `SIGUSR1` is reserved by Node.js to start the debugger. It's possible to <add>* `SIGUSR1` is reserved by Node.js to start the [debugger][]. It's possible to <ide> install a listener but doing so will _not_ stop the debugger from starting. <ide> * `SIGTERM` and `SIGINT` have default handlers on non-Windows platforms that <ide> resets the terminal mode before exiting with code `128 + signal number`. If <ide> cases: <ide> [`v8.setFlagsFromString()`]: v8.html#v8_v8_setflagsfromstring_flags <ide> [Child Process]: child_process.html <ide> [Cluster]: cluster.html <add>[debugger]: debugger.html <ide> [Duplex]: stream.html#stream_duplex_and_transform_streams <ide> [LTS]: https://github.com/nodejs/LTS/ <ide> [note on process I/O]: process.html#process_a_note_on_process_i_o
1
Ruby
Ruby
add check for 'head' and 'head do' both present
4c80bf4324956e39073eb7d13c144cdcd0f64fba
<ide><path>Library/Homebrew/cmd/audit.rb <ide> def audit_file <ide> [/^ test do/, "test block"] <ide> ] <ide> <del> component_list.map do |regex, name| <add> present = component_list.map do |regex, name| <ide> lineno = text.line_number regex <ide> next unless lineno <ide> [lineno, name] <del> end.compact.each_cons(2) do |c1, c2| <add> end.compact <add> present.each_cons(2) do |c1, c2| <ide> unless c1[0] < c2[0] <ide> problem "`#{c1[1]}` (line #{c1[0]}) should be put before `#{c2[1]}` (line #{c2[0]})" <ide> end <ide> end <add> present.map!(&:last) <add> if present.include?("head") && present.include?("head block") <add> problem "Should not have both `head` and `head do`" <add> end <ide> end <ide> <ide> def audit_class
1
PHP
PHP
remove duplicate eol
691ae2fafe66f8acb404eb8325efdfba2f353675
<ide><path>src/Illuminate/Auth/AuthServiceProvider.php <ide> public function register() <ide> }); <ide> } <ide> <del> <ide> /** <ide> * Get the services provided by the provider. <ide> * <ide><path>src/Illuminate/Console/Command.php <ide> public function askWithCompletion($question, array $choices, $default = null) <ide> return $helper->ask($this->input, $this->output, $question); <ide> } <ide> <del> <ide> /** <ide> * Prompt the user for input but hide the answer from the console. <ide> * <ide><path>src/Illuminate/Database/Connectors/PostgresConnector.php <ide> class PostgresConnector extends Connector implements ConnectorInterface { <ide> PDO::ATTR_STRINGIFY_FETCHES => false, <ide> ); <ide> <del> <ide> /** <ide> * Establish a database connection. <ide> * <ide><path>src/Illuminate/Database/Schema/Grammars/SqlServerGrammar.php <ide> protected function typeChar(Fluent $column) <ide> return "nchar({$column->length})"; <ide> } <ide> <del> <ide> /** <ide> * Create the column definition for a string type. <ide> * <ide><path>src/Illuminate/Routing/Console/MakeControllerCommand.php <ide> protected function getArguments() <ide> ); <ide> } <ide> <del> <ide> /** <ide> * Get the console command options. <ide> * <ide><path>src/Illuminate/Session/SessionManager.php <ide> protected function createRedisDriver() <ide> return $this->buildSession($handler); <ide> } <ide> <del> <ide> /** <ide> * Create an instance of a cache driven driver. <ide> *
6
Javascript
Javascript
remove slow commands check
3ac89aacd2ad1d5265d9ba82f95fafc6daaac77d
<ide><path>src/display/canvas.js <ide> var CanvasGraphics = (function CanvasGraphicsClosure() { <ide> var EO_CLIP = {}; <ide> <ide> CanvasGraphics.prototype = { <del> slowCommands: { <del> 'stroke': true, <del> 'closeStroke': true, <del> 'fill': true, <del> 'eoFill': true, <del> 'fillStroke': true, <del> 'eoFillStroke': true, <del> 'closeFillStroke': true, <del> 'closeEOFillStroke': true, <del> 'showText': true, <del> 'showSpacedText': true, <del> 'setStrokeColorSpace': true, <del> 'setFillColorSpace': true, <del> 'setStrokeColor': true, <del> 'setStrokeColorN': true, <del> 'setFillColor': true, <del> 'setFillColorN': true, <del> 'setStrokeGray': true, <del> 'setFillGray': true, <del> 'setStrokeRGBColor': true, <del> 'setFillRGBColor': true, <del> 'setStrokeCMYKColor': true, <del> 'setFillCMYKColor': true, <del> 'paintJpegXObject': true, <del> 'paintImageXObject': true, <del> 'paintInlineImageXObject': true, <del> 'paintInlineImageXObjectGroup': true, <del> 'paintImageMaskXObject': true, <del> 'paintImageMaskXObjectGroup': true, <del> 'shadingFill': true <del> }, <ide> <ide> beginDrawing: function CanvasGraphics_beginDrawing(viewport, transparency) { <ide> // For pdfs that use blend modes we have to clear the canvas else certain <ide> var CanvasGraphics = (function CanvasGraphicsClosure() { <ide> var commonObjs = this.commonObjs; <ide> var objs = this.objs; <ide> var fnId; <del> var slowCommands = this.slowCommands; <ide> <ide> while (true) { <ide> if (stepper && i === stepper.nextBreakPoint) { <ide> var CanvasGraphics = (function CanvasGraphicsClosure() { <ide> // If the execution took longer then a certain amount of time, shedule <ide> // to continue exeution after a short delay. <ide> // However, this is only possible if a 'continueCallback' is passed in. <del> if (continueCallback && slowCommands[fnId] && Date.now() > endTime) { <add> if (continueCallback && Date.now() > endTime) { <ide> setTimeout(continueCallback, 0); <ide> return i; <ide> }
1
Ruby
Ruby
use example.com as domain in tests
9a554f5f4b41d49ce0dd7e9f8c09ee519c1cb2e2
<ide><path>Library/Homebrew/test/test_dependency_collector.rb <ide> def test_does_not_mutate_dependency_spec <ide> <ide> def test_resource_dep_git_url <ide> resource = Resource.new <del> resource.url("git://github.com/foo/bar.git") <add> resource.url("git://example.com/foo/bar.git") <ide> assert_instance_of GitDependency, @d.add(resource) <ide> end <ide> <ide> def test_resource_dep_gzip_url <ide> resource = Resource.new <del> resource.url("http://foo.com/bar.tar.gz") <add> resource.url("http://example.com/foo.tar.gz") <ide> assert_nil @d.add(resource) <ide> end <ide> <ide> def test_resource_dep_xz_url <ide> resource = Resource.new <del> resource.url("http://foo.com/bar.tar.xz") <add> resource.url("http://example.com/foo.tar.xz") <ide> assert_equal Dependency.new("xz", [:build]), @d.add(resource) <ide> end <ide> <ide><path>Library/Homebrew/test/test_download_strategies.rb <ide> class ResourceDouble <ide> attr_reader :url, :specs <ide> <del> def initialize(url="http://foo.com/bar.tar.gz", specs={}) <add> def initialize(url="http://example.com/foo.tar.gz", specs={}) <ide> @url = url <ide> @specs = specs <ide> end <ide> def test_expand_safe_system_args_does_not_mutate_argument <ide> <ide> class VCSDownloadStrategyTests < Test::Unit::TestCase <ide> def setup <del> @resource = ResourceDouble.new("http://foo.com/bar") <add> @resource = ResourceDouble.new("http://example.com/bar") <ide> @strategy = VCSDownloadStrategy <ide> end <ide> <ide> def setup <ide> end <ide> <ide> def test_detect_git_download_startegy <del> @d = DownloadStrategyDetector.detect("git://foo.com/bar.git") <add> @d = DownloadStrategyDetector.detect("git://example.com/foo.git") <ide> assert_equal GitDownloadStrategy, @d <ide> end <ide> <ide><path>Library/Homebrew/test/test_formula.rb <ide> def test_class_naming <ide> def test_formula_spec_integration <ide> f = Class.new(Formula) do <ide> homepage 'http://example.com' <del> url 'file:///foo.com/testball-0.1.tbz' <del> mirror 'file:///foo.org/testball-0.1.tbz' <add> url 'http://example.com/test-0.1.tbz' <add> mirror 'http://example.org/test-0.1.tbz' <ide> sha1 TEST_SHA1 <ide> <del> head 'https://github.com/Homebrew/homebrew.git', :tag => 'foo' <add> head 'http://example.com/test.git', :tag => 'foo' <ide> <ide> devel do <del> url 'file:///foo.com/testball-0.2.tbz' <del> mirror 'file:///foo.org/testball-0.2.tbz' <add> url 'http://example.com/test-0.2.tbz' <add> mirror 'http://example.org/test-0.2.tbz' <ide> sha256 TEST_SHA256 <ide> end <ide> <ide> bottle { sha1 TEST_SHA1 => bottle_tag } <ide> <del> def initialize(name="spec_test_ball", path=nil) <add> def initialize(name="test", path=nil) <ide> super <ide> end <ide> end.new <ide><path>Library/Homebrew/test/test_resource.rb <ide> def test_version <ide> end <ide> <ide> def test_version_from_url <del> @resource.url('http://foo.com/bar-1.0.tar.gz') <add> @resource.url('http://example.com/foo-1.0.tar.gz') <ide> assert_version_equal '1.0', @resource.version <ide> assert @resource.version.detected_from_url? <ide> end <ide> def test_version_with_scheme <ide> end <ide> <ide> def test_version_from_tag <del> @resource.url('http://foo.com/bar-1.0.tar.gz', :tag => 'v1.0.2') <add> @resource.url('http://example.com/foo-1.0.tar.gz', :tag => 'v1.0.2') <ide> assert_version_equal '1.0.2', @resource.version <ide> assert @resource.version.detected_from_url? <ide> end
4
Javascript
Javascript
fix usage of bound outlet name
0b9af9c82181840d89dd42fc228fe2cb626b52a3
<ide><path>packages/ember-glimmer/lib/syntax/outlet.js <ide> import { generateGuid, guidFor } from 'ember-metal/utils'; <ide> import { _instrumentStart } from 'ember-metal/instrumentation'; <ide> import { RootReference } from '../utils/references'; <ide> <add>import { <add> UpdatableTag, <add> ConstReference, <add> combine <add>} from 'glimmer-reference'; <add> <ide> function outletComponentFor(vm) { <ide> let { outletState, isTopLevel } = vm.dynamicScope(); <ide> <ide> if (isTopLevel) { <ide> return new TopLevelOutletComponentReference(outletState); <ide> } else { <ide> let args = vm.getArgs(); <del> let outletName = args.positional.at(0).value() || 'main'; <del> return new OutletComponentReference(outletName, outletState.get(outletName)); <add> let outletNameRef; <add> if (args.positional.length === 0) { <add> outletNameRef = new ConstReference('main'); <add> } else { <add> outletNameRef = args.positional.at(0); <add> } <add> <add> return new OutletComponentReference(outletNameRef, outletState); <ide> } <ide> } <ide> <ide> export class OutletSyntax extends StatementSyntax { <ide> static create(environment, args, templates, symbolTable) { <del> return new this(environment, args, templates, symbolTable); <add> let definitionArgs = ArgsSyntax.fromPositionalArgs(args.positional.slice(0, 1)); <add> <add> return new this(environment, definitionArgs, templates, symbolTable); <ide> } <ide> <ide> constructor(environment, args, templates, symbolTable) { <ide> class TopLevelOutletComponentReference { <ide> } <ide> <ide> class OutletComponentReference { <del> constructor(outletName, reference) { <del> this.outletName = outletName; <del> this.reference = reference; <add> constructor(outletNameRef, parentOutletStateRef) { <add> this.outletNameRef = outletNameRef; <add> this.parentOutletStateRef = parentOutletStateRef; <ide> this.definition = null; <ide> this.lastState = null; <del> this.tag = reference.tag; <add> let outletStateTag = this.outletStateTag = new UpdatableTag(parentOutletStateRef.tag); <add> this.tag = combine([outletStateTag.tag, outletNameRef.tag]); <ide> } <ide> <ide> value() { <del> let { outletName, reference, definition, lastState } = this; <del> let newState = this.lastState = reference.value(); <add> let { outletNameRef, parentOutletStateRef, definition, lastState } = this; <add> <add> <add> let outletName = outletNameRef.value(); <add> let outletStateRef = parentOutletStateRef.get(outletName); <add> let newState = this.lastState = outletStateRef.value(); <add> <add> this.outletStateTag.update(outletStateRef.tag); <add> <ide> definition = revalidate(definition, lastState, newState); <ide> <ide> let hasTemplate = newState && newState.render.template; <ide><path>packages/ember-glimmer/tests/integration/outlet-test.js <ide> moduleFor('outlet view', class extends RenderingTest { <ide> this.assertText('HIBYE'); <ide> } <ide> <del> ['@skip should support bound outlet name']() { <add> ['@test does not default outlet name when positional argument is present']() { <add> this.registerTemplate('application', '<h1>HI</h1>{{outlet someUndefinedThing}}'); <add> let outletState = { <add> render: { <add> owner: this.owner, <add> into: undefined, <add> outlet: 'main', <add> name: 'application', <add> controller: {}, <add> ViewClass: undefined, <add> template: this.owner.lookup('template:application') <add> }, <add> outlets: Object.create(null) <add> }; <add> <add> this.runTask(() => this.component.setOutletState(outletState)); <add> <add> runAppend(this.component); <add> <add> this.assertText('HI'); <add> <add> this.assertStableRerender(); <add> <add> this.registerTemplate('special', '<p>BYE</p>'); <add> outletState.outlets.main = { <add> render: { <add> owner: this.owner, <add> into: undefined, <add> outlet: 'main', <add> name: 'special', <add> controller: {}, <add> ViewClass: undefined, <add> template: this.owner.lookup('template:special') <add> }, <add> outlets: Object.create(null) <add> }; <add> <add> this.runTask(() => this.component.setOutletState(outletState)); <add> <add> this.assertText('HI'); <add> } <add> <add> ['@test should support bound outlet name']() { <ide> let controller = { outletName: 'foo' }; <ide> this.registerTemplate('application', '<h1>HI</h1>{{outlet outletName}}'); <ide> let outletState = { <ide> moduleFor('outlet view', class extends RenderingTest { <ide> into: undefined, <ide> outlet: 'main', <ide> name: 'application', <del> controller: controller, <add> controller, <ide> ViewClass: undefined, <ide> template: this.owner.lookup('template:application') <ide> },
2
Javascript
Javascript
remove unnecessary stack
b58dec979a19d75d8f62fa5e8570ce33fbe50310
<ide><path>packages/react-reconciler/src/ReactFiberThrow.new.js <ide> import { <ide> ForceUpdate, <ide> enqueueUpdate, <ide> } from './ReactUpdateQueue.new'; <del>import {getStackByFiberInDevAndProd} from './ReactFiberComponentStack'; <ide> import {markFailedErrorBoundaryForHotReloading} from './ReactFiberHotReloading.new'; <ide> import { <ide> suspenseStackCursor, <ide> function throwException( <ide> ' suspended while rendering, but no fallback UI was specified.\n' + <ide> '\n' + <ide> 'Add a <Suspense fallback=...> component higher in the tree to ' + <del> 'provide a loading indicator or placeholder to display.' + <del> getStackByFiberInDevAndProd(sourceFiber), <add> 'provide a loading indicator or placeholder to display.', <ide> ); <ide> } <ide> <ide><path>packages/react-reconciler/src/ReactFiberThrow.old.js <ide> import { <ide> ForceUpdate, <ide> enqueueUpdate, <ide> } from './ReactUpdateQueue.old'; <del>import {getStackByFiberInDevAndProd} from './ReactFiberComponentStack'; <ide> import {markFailedErrorBoundaryForHotReloading} from './ReactFiberHotReloading.old'; <ide> import { <ide> suspenseStackCursor, <ide> function throwException( <ide> ' suspended while rendering, but no fallback UI was specified.\n' + <ide> '\n' + <ide> 'Add a <Suspense fallback=...> component higher in the tree to ' + <del> 'provide a loading indicator or placeholder to display.' + <del> getStackByFiberInDevAndProd(sourceFiber), <add> 'provide a loading indicator or placeholder to display.', <ide> ); <ide> } <ide>
2
Python
Python
add synthetic data option to resnet
74c43aaef62a6f4451e90f95be801882a935177e
<ide><path>official/resnet/cifar10_main.py <ide> def input_fn(is_training, data_dir, batch_size, num_epochs=1, <ide> examples_per_epoch=num_images, multi_gpu=multi_gpu) <ide> <ide> <add>def get_synth_input_fn(): <add> return resnet.get_synth_input_fn(_HEIGHT, _WIDTH, _NUM_CHANNELS, _NUM_CLASSES) <add> <add> <ide> ############################################################################### <ide> # Running the model <ide> ############################################################################### <ide> def loss_filter_fn(name): <ide> <ide> <ide> def main(unused_argv): <del> resnet.resnet_main(FLAGS, cifar10_model_fn, input_fn) <add> input_function = FLAGS.use_synthetic_data and get_synth_input_fn() or input_fn <add> resnet.resnet_main(FLAGS, cifar10_model_fn, input_function) <ide> <ide> <ide> if __name__ == '__main__': <ide><path>official/resnet/cifar10_test.py <ide> def test_dataset_input_fn(self): <ide> for pixel in row: <ide> self.assertAllClose(pixel, np.array([-1.225, 0., 1.225]), rtol=1e-3) <ide> <del> def input_fn(self): <del> features = tf.random_uniform([_BATCH_SIZE, _HEIGHT, _WIDTH, _NUM_CHANNELS]) <del> labels = tf.random_uniform( <del> [_BATCH_SIZE], maxval=9, dtype=tf.int32) <del> return features, tf.one_hot(labels, 10) <del> <ide> def cifar10_model_fn_helper(self, mode, multi_gpu=False): <del> features, labels = self.input_fn() <add> input_fn = cifar10_main.get_synth_input_fn() <add> dataset = input_fn(True, '', _BATCH_SIZE) <add> iterator = dataset.make_one_shot_iterator() <add> features, labels = iterator.get_next() <ide> spec = cifar10_main.cifar10_model_fn( <ide> features, labels, mode, { <ide> 'resnet_size': 32, <ide><path>official/resnet/imagenet_main.py <ide> def input_fn(is_training, data_dir, batch_size, num_epochs=1, <ide> # Convert to individual records <ide> dataset = dataset.flat_map(tf.data.TFRecordDataset) <ide> <del> return resnet.process_record_dataset(dataset, is_training, batch_size, <del> _SHUFFLE_BUFFER, parse_record, num_epochs, num_parallel_calls, <del> examples_per_epoch=num_images, multi_gpu=multi_gpu) <add> return resnet.process_record_dataset( <add> dataset, is_training, batch_size, _SHUFFLE_BUFFER, parse_record, <add> num_epochs, num_parallel_calls, examples_per_epoch=num_images, <add> multi_gpu=multi_gpu) <add> <add> <add>def get_synth_input_fn(): <add> return resnet.get_synth_input_fn( <add> _DEFAULT_IMAGE_SIZE, _DEFAULT_IMAGE_SIZE, _NUM_CHANNELS, _NUM_CLASSES) <ide> <ide> <ide> ############################################################################### <ide> def imagenet_model_fn(features, labels, mode, params): <ide> <ide> <ide> def main(unused_argv): <del> resnet.resnet_main(FLAGS, imagenet_model_fn, input_fn) <add> input_function = FLAGS.use_synthetic_data and get_synth_input_fn() or input_fn <add> resnet.resnet_main(FLAGS, imagenet_model_fn, input_function) <ide> <ide> <ide> if __name__ == '__main__': <ide><path>official/resnet/imagenet_test.py <ide> def test_tensor_shapes_resnet_152_with_gpu(self): <ide> def test_tensor_shapes_resnet_200_with_gpu(self): <ide> self.tensor_shapes_helper(200, True) <ide> <del> def input_fn(self): <del> """Provides random features and labels.""" <del> features = tf.random_uniform([_BATCH_SIZE, 224, 224, 3]) <del> labels = tf.one_hot( <del> tf.random_uniform( <del> [_BATCH_SIZE], maxval=_LABEL_CLASSES - 1, <del> dtype=tf.int32), <del> _LABEL_CLASSES) <del> <del> return features, labels <del> <ide> def resnet_model_fn_helper(self, mode, multi_gpu=False): <ide> """Tests that the EstimatorSpec is given the appropriate arguments.""" <ide> tf.train.create_global_step() <ide> <del> features, labels = self.input_fn() <add> input_fn = imagenet_main.get_synth_input_fn() <add> dataset = input_fn(True, '', _BATCH_SIZE) <add> iterator = dataset.make_one_shot_iterator() <add> features, labels = iterator.get_next() <ide> spec = imagenet_main.imagenet_model_fn( <ide> features, labels, mode, { <ide> 'resnet_size': 50, <ide><path>official/resnet/resnet.py <ide> def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer, <ide> return dataset <ide> <ide> <add>def get_synth_input_fn(height, width, num_channels, num_classes): <add> """Returns an input function that returns a dataset with zeroes. <add> <add> This is useful in debugging input pipeline performance, as it removes all <add> elements of file reading and image preprocessing. <add> <add> Args: <add> height: Integer height that will be used to create a fake image tensor. <add> width: Integer width that will be used to create a fake image tensor. <add> num_channels: Integer depth that will be used to create a fake image tensor. <add> num_classes: Number of classes that should be represented in the fake labels <add> tensor <add> <add> Returns: <add> An input_fn that can be used in place of a real one to return a dataset <add> that can be used for iteration. <add> """ <add> def input_fn(is_training, data_dir, batch_size, *args): <add> images = tf.zeros((batch_size, height, width, num_channels), tf.float32) <add> labels = tf.zeros((batch_size, num_classes), tf.int32) <add> return tf.data.Dataset.from_tensors((images, labels)).repeat() <add> <add> return input_fn <add> <add> <ide> ################################################################################ <ide> # Functions building the ResNet model. <ide> ################################################################################ <ide> def __init__(self, resnet_size_choices=None): <ide> <ide> self.add_argument( <ide> '--multi_gpu', action='store_true', <del> help='If set, run across all available GPUs. Note that this is ' <del> 'superseded by the --num_gpus flag.') <add> help='If set, run across all available GPUs.') <add> <add> # Advanced args <add> self.add_argument( <add> '--use_synthetic_data', action='store_true', <add> help='If set, use fake data (zeroes) instead of a real dataset. ' <add> 'This mode is useful for performance debugging, as it removes ' <add> 'input processing steps, but will not learn anything.') <ide><path>official/resnet/vgg_preprocessing.py <ide> def _random_crop_and_flip(image, crop_height, crop_width): <ide> height, width = _get_h_w(image) <ide> <ide> # Create a random bounding box. <del> # <ide> # Use tf.random_uniform and not numpy.random.rand as doing the former would <ide> # generate random numbers at graph eval time, unlike the latter which <ide> # generates random numbers at graph definition time. <ide> def _random_crop_and_flip(image, crop_height, crop_width): <ide> cropped = tf.image.random_flip_left_right(cropped) <ide> return cropped <ide> <add> <ide> def _central_crop(image, crop_height, crop_width): <ide> """Performs central crops of the given image list. <ide>
6
Javascript
Javascript
fix bug in comparison logic of object property
c3b3eb7f73b0fb4035d4b2478bf9827caf746372
<ide><path>Libraries/vendor/core/mergeInto.js <ide> function mergeInto(one, two) { <ide> if (two != null) { <ide> checkMergeObjectArg(two); <ide> for (var key in two) { <del> if (!two.hasOwnProperty(key)) { <add> if (!Object.prototype.hasOwnProperty.call(two, key)) { <ide> continue; <ide> } <ide> one[key] = two[key];
1
Java
Java
move observer factory methods to observers
b49943ae60a13df551ff26a6e712e444a316c54d
<ide><path>rxjava-core/src/main/java/rx/Observer.java <ide> protected Observer(Observer<?> op) { <ide> */ <ide> public abstract void onNext(T args); <ide> <del> /** <del> * Create an empty Observer that ignores all events. <del> */ <del> public static final <T> Observer<T> create() { <del> return new Observer<T>() { <del> <del> @Override <del> public final void onCompleted() { <del> // do nothing <del> } <del> <del> @Override <del> public final void onError(Throwable e) { <del> throw new OnErrorNotImplementedException(e); <del> } <del> <del> @Override <del> public final void onNext(T args) { <del> // do nothing <del> } <del> <del> }; <del> } <del> <del> /** <del> * Create an Observer that receives `onNext` and ignores `onError` and `onCompleted`. <del> */ <del> public static final <T> Observer<T> create(final Action1<? super T> onNext) { <del> if (onNext == null) { <del> throw new IllegalArgumentException("onNext can not be null"); <del> } <del> <del> return new Observer<T>() { <del> <del> @Override <del> public final void onCompleted() { <del> // do nothing <del> } <del> <del> @Override <del> public final void onError(Throwable e) { <del> throw new OnErrorNotImplementedException(e); <del> } <del> <del> @Override <del> public final void onNext(T args) { <del> onNext.call(args); <del> } <del> <del> }; <del> } <del> <del> /** <del> * Create an Observer that receives `onNext` and `onError` and ignores `onCompleted`. <del> * <del> */ <del> public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError) { <del> if (onNext == null) { <del> throw new IllegalArgumentException("onNext can not be null"); <del> } <del> if (onError == null) { <del> throw new IllegalArgumentException("onError can not be null"); <del> } <del> <del> return new Observer<T>() { <del> <del> @Override <del> public final void onCompleted() { <del> // do nothing <del> } <del> <del> @Override <del> public final void onError(Throwable e) { <del> onError.call(e); <del> } <del> <del> @Override <del> public final void onNext(T args) { <del> onNext.call(args); <del> } <del> <del> }; <del> } <del> <del> /** <del> * Create an Observer that receives `onNext`, `onError` and `onCompleted`. <del> * <del> */ <del> public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError, final Action0 onComplete) { <del> if (onNext == null) { <del> throw new IllegalArgumentException("onNext can not be null"); <del> } <del> if (onError == null) { <del> throw new IllegalArgumentException("onError can not be null"); <del> } <del> if (onComplete == null) { <del> throw new IllegalArgumentException("onComplete can not be null"); <del> } <del> <del> return new Observer<T>() { <del> <del> @Override <del> public final void onCompleted() { <del> onComplete.call(); <del> } <del> <del> @Override <del> public final void onError(Throwable e) { <del> onError.call(e); <del> } <del> <del> @Override <del> public final void onNext(T args) { <del> onNext.call(args); <del> } <del> <del> }; <del> } <del> <ide> /** <ide> * Used to register an unsubscribe callback. <ide> */ <ide><path>rxjava-core/src/main/java/rx/observers/Observers.java <add>package rx.observers; <add> <add>import rx.Observer; <add>import rx.util.OnErrorNotImplementedException; <add>import rx.util.functions.Action0; <add>import rx.util.functions.Action1; <add> <add>public class Observers { <add> <add> /** <add> * Create an empty Observer that ignores all events. <add> */ <add> public static final <T> Observer<T> create() { <add> return new Observer<T>() { <add> <add> @Override <add> public final void onCompleted() { <add> // do nothing <add> } <add> <add> @Override <add> public final void onError(Throwable e) { <add> throw new OnErrorNotImplementedException(e); <add> } <add> <add> @Override <add> public final void onNext(T args) { <add> // do nothing <add> } <add> <add> }; <add> } <add> <add> /** <add> * Create an Observer that receives `onNext` and ignores `onError` and `onCompleted`. <add> */ <add> public static final <T> Observer<T> create(final Action1<? super T> onNext) { <add> if (onNext == null) { <add> throw new IllegalArgumentException("onNext can not be null"); <add> } <add> <add> return new Observer<T>() { <add> <add> @Override <add> public final void onCompleted() { <add> // do nothing <add> } <add> <add> @Override <add> public final void onError(Throwable e) { <add> throw new OnErrorNotImplementedException(e); <add> } <add> <add> @Override <add> public final void onNext(T args) { <add> onNext.call(args); <add> } <add> <add> }; <add> } <add> <add> /** <add> * Create an Observer that receives `onNext` and `onError` and ignores `onCompleted`. <add> * <add> */ <add> public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError) { <add> if (onNext == null) { <add> throw new IllegalArgumentException("onNext can not be null"); <add> } <add> if (onError == null) { <add> throw new IllegalArgumentException("onError can not be null"); <add> } <add> <add> return new Observer<T>() { <add> <add> @Override <add> public final void onCompleted() { <add> // do nothing <add> } <add> <add> @Override <add> public final void onError(Throwable e) { <add> onError.call(e); <add> } <add> <add> @Override <add> public final void onNext(T args) { <add> onNext.call(args); <add> } <add> <add> }; <add> } <add> <add> /** <add> * Create an Observer that receives `onNext`, `onError` and `onCompleted`. <add> * <add> */ <add> public static final <T> Observer<T> create(final Action1<? super T> onNext, final Action1<Throwable> onError, final Action0 onComplete) { <add> if (onNext == null) { <add> throw new IllegalArgumentException("onNext can not be null"); <add> } <add> if (onError == null) { <add> throw new IllegalArgumentException("onError can not be null"); <add> } <add> if (onComplete == null) { <add> throw new IllegalArgumentException("onComplete can not be null"); <add> } <add> <add> return new Observer<T>() { <add> <add> @Override <add> public final void onCompleted() { <add> onComplete.call(); <add> } <add> <add> @Override <add> public final void onError(Throwable e) { <add> onError.call(e); <add> } <add> <add> @Override <add> public final void onNext(T args) { <add> onNext.call(args); <add> } <add> <add> }; <add> } <add> <add>}
2
Python
Python
support t5 distillation w/hidden state supervision
d5d2744aa799b94488960a261d1b7376d791a621
<ide><path>examples/seq2seq/distillation.py <ide> class BartSummarizationDistiller(SummarizationModule): <ide> """Supports Bart, Pegasus and other models that inherit from Bart.""" <ide> <del> loss_names = ["loss", "ce_loss", "mlm_loss", "enc_mse_loss", "hid_loss_enc", "hid_loss_dec"] <add> loss_names = ["loss", "ce_loss", "mlm_loss", "hid_loss_enc", "hid_loss_dec"] <ide> <ide> def __init__(self, hparams): <ide> assert Path(hparams.data_dir).exists() <ide> def __init__(self, hparams): <ide> if hparams.length_penalty != -1: <ide> student.config.length_penalty = hparams.length_penalty <ide> super().__init__(hparams, model=student, config=student.config) <add> model_type = student.config.model_type <ide> self.e_layer_ids, self.d_layer_ids = e_layer_ids, d_layer_ids # type: List[int], List[int] <del> self.different_encoder = hparams.student_encoder_layers != teacher.config.encoder_layers <del> self.different_decoder = hparams.student_decoder_layers != teacher.config.decoder_layers <add> <add> if model_type == "t5": <add> teacher_encoder_layers = len(teacher.get_encoder().block) <add> teacher_decoder_layers = len(teacher.get_decoder().block) <add> else: <add> teacher_encoder_layers = teacher.config.encoder_layers <add> teacher_decoder_layers = teacher.config.decoder_layers <add> <add> self.different_encoder = hparams.student_encoder_layers != teacher_encoder_layers <add> self.different_decoder = hparams.student_decoder_layers != teacher_decoder_layers <add> <ide> self.teacher = teacher <ide> freeze_params(self.teacher) <ide> <ide> def __init__(self, hparams): <ide> del self.teacher.encoder <ide> # Intermediate supervision: Decide which layers to supervise <ide> if hparams.supervise_forward: <del> self.d_matches = get_layers_to_supervise( <del> n_student=len(self.d_layer_ids), n_teacher=self.teacher.config.decoder_layers <del> ) <del> else: <add> self.e_matches = get_layers_to_supervise(n_student=len(self.e_layer_ids), n_teacher=teacher_encoder_layers) <add> self.d_matches = get_layers_to_supervise(n_student=len(self.d_layer_ids), n_teacher=teacher_decoder_layers) <add> else: # student layer should emulate hidden states of the teacher layer it was copied from <add> self.e_matches = self.e_layer_ids <ide> self.d_matches = self.d_layer_ids <add> <ide> self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") <ide> self.temperature = 2.0 <ide> self.alpha_mlm = hparams.alpha_mlm <ide> self.alpha_ce = hparams.alpha_ce <ide> self.alpha_hid = hparams.alpha_hid <del> self.alpha_encoder_loss = hparams.alpha_encoder_loss <ide> gc.collect() <ide> torch.cuda.empty_cache() <ide> <ide> def _step(self, batch): <ide> output_hidden_states=True, <ide> output_attentions=False, <ide> use_cache=False, <del> ) # TODO(@sshleifer): return_dict=True cleanup <add> ) <ide> <ide> # Same cross entropy vs. label smoothing logic as finetune.py <ide> assert lm_logits.shape[-1] == self.model.config.vocab_size <ide> def _step(self, batch): <ide> def zero_tensor(): <ide> return torch.tensor(0.0).type_as(student_lm_loss) <ide> <del> loss_encoder, hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor(), zero_tensor() <del> if self.different_encoder: <add> hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor() <add> if self.different_encoder: # compute encoder hidden state loss <ide> with torch.no_grad(): <del> teacher_enc_outputs, teacher_enc_hid, _ = self.teacher.get_encoder()( <del> input_ids, attention_mask=src_mask, output_hidden_states=True <del> ) <del> # DEPRECATE THIS <del> if self.hparams.alpha_encoder_loss > 0: <del> loss_encoder = self.calc_mse_loss(enc_outputs, teacher_enc_outputs, src_mask) <del> <del> hid_loss_enc = self.calc_hidden_loss(src_mask, enc_hidden_state, teacher_enc_hid, self.e_layer_ids) <del> <del> teacher_enc_outputs = (enc_outputs,) <del> assert isinstance(teacher_enc_outputs, tuple), type(teacher_enc_outputs) <add> teacher_enc_hid = self.teacher.get_encoder()( <add> input_ids, attention_mask=src_mask, output_hidden_states=True, return_dict=True <add> ).hidden_states <add> <add> hid_loss_enc = self.calc_hidden_loss( <add> src_mask, <add> enc_hidden_state, <add> teacher_enc_hid, <add> self.e_matches, <add> normalize_hidden=self.hparams.normalize_hidden, <add> ) <ide> <ide> with torch.no_grad(): <del> tloss, tlogits, tdec_hidden, _ = self.teacher( <add> outputs = self.teacher( <ide> input_ids, <ide> attention_mask=src_mask, <del> encoder_outputs=teacher_enc_outputs, <add> encoder_outputs=(enc_outputs,), <ide> decoder_input_ids=decoder_input_ids, <ide> lm_labels=labels, <ide> output_hidden_states=True, <add> return_dict=True, <ide> ) <add> tlogits, tdec_hidden = outputs.logits, outputs.decoder_hidden_states <ide> dec_mask = decoder_input_ids.ne(pad_token_id) <ide> loss_ce = self.calc_ce_loss(dec_mask, lm_logits, tlogits) <ide> if self.alpha_hid > 0: # Intermediate supervision of decoder hidden states <ide> def zero_tensor(): <ide> blended_loss = ( <ide> self.alpha_ce * loss_ce <ide> + self.alpha_mlm * student_lm_loss <del> + self.hparams.alpha_encoder_loss * loss_encoder <ide> + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) <ide> ) <del> return blended_loss, loss_ce, student_lm_loss, loss_encoder, hid_loss_enc, hid_loss_dec <add> return blended_loss, loss_ce, student_lm_loss, hid_loss_enc, hid_loss_dec <ide> <ide> @staticmethod <ide> def calc_hidden_loss(attention_mask, hidden_states, hidden_states_T, matches, normalize_hidden): <ide> def add_distill_args(parser): <ide> parser.add_argument("--teacher", type=str) <ide> parser.add_argument("--alpha_ce", default=0.8, type=float) <ide> parser.add_argument("--alpha_mlm", default=0.2, type=float) <del> parser.add_argument("--alpha_encoder_loss", default=0.0, type=float) <ide> parser.add_argument("--alpha_hid", default=0.0, type=float, required=False) <ide> parser.add_argument("--student_decoder_layers", default=12, type=int, required=False) <ide> parser.add_argument("--student_encoder_layers", default=12, type=int, required=False) <ide><path>examples/seq2seq/test_seq2seq_examples.py <ide> "n_val": -1, <ide> "n_test": -1, <ide> "student_encoder_layers": 1, <del> "alpha_encoder_loss": 0.0, <ide> "freeze_encoder": False, <ide> "auto_scale_batch_size": False, <ide> } <ide> def test_distill_mbart(self): <ide> <ide> evaluate_checkpoint(ckpts[0], dest_dir=Path(tempfile.mkdtemp())) <ide> <del> @unittest.skip("T5 distillation is broken at the moment") <ide> def test_distill_t5(self): <ide> updates = dict( <ide> student_encoder_layers=1, <ide> def _test_distiller_cli(self, updates, check_contents=True): <ide> model_name_or_path="sshleifer/tinier_bart", <ide> teacher=CHEAP_ARGS["model_name_or_path"], <ide> val_check_interval=0.5, <del> alpha_encoder_loss=0.4, <ide> ) <ide> default_updates.update(updates) <ide> args_d: dict = CHEAP_ARGS.copy()
2
Text
Text
prepare 14.x changelog for remark update
bea0ee803302b9bb72b6ecf7a799fc2264ee1389
<ide><path>doc/changelogs/CHANGELOG_V14.md <ide> <ide> <!--lint disable prohibited-strings--> <ide> <!--lint disable maximum-line-length--> <add><!--lint disable no-literal-urls--> <ide> <ide> <table> <ide> <tr>
1
Javascript
Javascript
use fixtures.path for cmd string building
b13e660a47a88396c7316cbb6178a18a8f4a52d3
<ide><path>test/parallel/test-error-reporting.js <ide> const common = require('../common'); <ide> const assert = require('assert'); <ide> const exec = require('child_process').exec; <del>const path = require('path'); <add>const fixtures = require('../common/fixtures'); <ide> <ide> function errExec(script, callback) { <del> const cmd = `"${process.argv[0]}" "${path.join(common.fixturesDir, script)}"`; <add> const cmd = `"${process.argv[0]}" "${fixtures.path(script)}"`; <ide> return exec(cmd, function(err, stdout, stderr) { <ide> // There was some error <ide> assert.ok(err);
1
PHP
PHP
apply fixes from styleci
562b869f0fff57fa463ea0ec61b5d0c7de887f35
<ide><path>src/Illuminate/Database/Schema/Grammars/RenameColumn.php <ide> protected static function getRenamedDiff($grammar, Blueprint $blueprint, Fluent <ide> protected static function setRenamedColumns(TableDiff $tableDiff, Fluent $command, Column $column) <ide> { <ide> $tableDiff->renamedColumns = [ <del> $command->from => new Column($command->to, $column->getType(), $column->toArray()) <add> $command->from => new Column($command->to, $column->getType(), $column->toArray()), <ide> ]; <ide> <ide> return $tableDiff;
1
PHP
PHP
extract some methods to break down a mega method
dd944162a7d2a813617cef82ae7b82fc0f4b11e0
<ide><path>src/ORM/Association/BelongsToMany.php <ide> public function targetForeignKey($key = null) <ide> */ <ide> public function junction($table = null) <ide> { <del> $target = $this->target(); <del> $source = $this->source(); <del> $sAlias = $source->alias(); <del> $tAlias = $target->alias(); <ide> $tableLocator = $this->tableLocator(); <ide> <ide> if ($table === null) { <ide> public function junction($table = null) <ide> if (is_string($table)) { <ide> $table = $tableLocator->get($table); <ide> } <del> $junctionAlias = $table->alias(); <add> $target = $this->target(); <add> $source = $this->source(); <ide> <del> if (!$table->association($sAlias)) { <del> $table <del> ->belongsTo($sAlias, ['foreignKey' => $this->foreignKey()]) <del> ->target($source); <del> } <add> $this->_generateSourceAssociations($table, $source); <add> $this->_generateTargetAssociations($table, $source, $target); <add> $this->_generateJunctionAssociations($table, $source, $target); <add> return $this->_junctionTable = $table; <add> } <ide> <del> if (!$table->association($tAlias)) { <del> $table <del> ->belongsTo($tAlias, ['foreignKey' => $this->targetForeignKey()]) <del> ->target($target); <del> } <add> /** <add> * Generate reciprocal associations as necessary. <add> * <add> * Generates the following associations: <add> * <add> * - target hasMany junction e.g. Articles hasMany ArticlesTags <add> * - target belongsToMany source e.g Articles belongsToMany Tags. <add> * <add> * You can override these generated associations by defining associations <add> * with the correct aliases. <add> * <add> * @param \Cake\ORM\Table $junction The junction table. <add> * @param \Cake\ORM\Table $source The source table. <add> * @param \Cake\ORM\Table $target The target table. <add> * @return void <add> */ <add> protected function _generateTargetAssociations($junction, $source, $target) <add> { <add> $junctionAlias = $junction->alias(); <add> $sAlias = $source->alias(); <ide> <ide> if (!$target->association($junctionAlias)) { <ide> $target->hasMany($junctionAlias, [ <del> 'targetTable' => $table, <add> 'targetTable' => $junction, <ide> 'foreignKey' => $this->targetForeignKey(), <ide> ]); <ide> } <add> if (!$target->association($sAlias)) { <add> $target->belongsToMany($sAlias, [ <add> 'sourceTable' => $target, <add> 'targetTable' => $source, <add> 'foreignKey' => $this->targetForeignKey(), <add> 'targetForeignKey' => $this->foreignKey(), <add> 'through' => $junction <add> ]); <add> } <add> } <ide> <add> /** <add> * Generate additional source table associations as necessary. <add> * <add> * Generates the following associations: <add> * <add> * - source hasMany junction e.g. Tags hasMany ArticlesTags <add> * <add> * You can override these generated associations by defining associations <add> * with the correct aliases. <add> * <add> * @param \Cake\ORM\Table $junction The junction table. <add> * @param \Cake\ORM\Table $source The source table. <add> * @return void <add> */ <add> protected function _generateSourceAssociations($junction, $source) <add> { <add> $junctionAlias = $junction->alias(); <ide> if (!$source->association($junctionAlias)) { <ide> $source->hasMany($junctionAlias, [ <del> 'targetTable' => $table, <add> 'targetTable' => $junction, <ide> 'foreignKey' => $this->foreignKey(), <ide> ]); <ide> } <add> } <ide> <del> if (!$target->association($sAlias)) { <del> $target->belongsToMany($sAlias, [ <del> 'sourceTable' => $target, <del> 'targetTable' => $source, <add> /** <add> * Generate associations on the junction table as necessary <add> * <add> * Generates the following associations: <add> * <add> * - junction belongsTo source e.g. ArticlesTags belongsTo Tags <add> * - junction belongsTo target e.g. ArticlesTags belongsTo Articles <add> * <add> * You can override these generated associations by defining associations <add> * with the correct aliases. <add> * <add> * @param \Cake\ORM\Table $junction The junction table. <add> * @param \Cake\ORM\Table $source The source table. <add> * @param \Cake\ORM\Table $target The target table. <add> * @return void <add> */ <add> protected function _generateJunctionAssociations($junction, $source, $target) <add> { <add> $tAlias = $target->alias(); <add> $sAlias = $source->alias(); <add> <add> if (!$junction->association($tAlias)) { <add> $junction->belongsTo($tAlias, [ <ide> 'foreignKey' => $this->targetForeignKey(), <del> 'targetForeignKey' => $this->foreignKey(), <del> 'through' => $table <add> 'targetTable' => $target <add> ]); <add> } <add> if (!$junction->association($sAlias)) { <add> $junction->belongsTo($sAlias, [ <add> 'foreignKey' => $this->foreignKey(), <add> 'targetTable' => $source <ide> ]); <ide> } <del> <del> return $this->_junctionTable = $table; <ide> } <ide> <ide> /**
1
Ruby
Ruby
remove virtual_path from fallback templates
d0c745b8634be3bd830bc93998f199e18bebef49
<ide><path>actionview/lib/action_view/template/resolver.rb <ide> def query(path, details, formats, outside_app_allowed, locals) <ide> template_paths = reject_files_external_to_app(template_paths) unless outside_app_allowed <ide> <ide> template_paths.map do |template| <del> handler, format, variant = extract_handler_and_format_and_variant(template) <del> <del> FileTemplate.new(File.expand_path(template), handler, <del> virtual_path: path.virtual, <del> format: format, <del> variant: variant, <del> locals: locals <del> ) <add> build_template(template, path.virtual, locals) <ide> end <ide> end <ide> <add> def build_template(template, virtual_path, locals) <add> handler, format, variant = extract_handler_and_format_and_variant(template) <add> <add> FileTemplate.new(File.expand_path(template), handler, <add> virtual_path: virtual_path, <add> format: format, <add> variant: variant, <add> locals: locals <add> ) <add> end <add> <ide> def reject_files_external_to_app(files) <ide> files.reject { |filename| !inside_path?(@path, filename) } <ide> end <ide> class FallbackFileSystemResolver < FileSystemResolver #:nodoc: <ide> def self.instances <ide> [new(""), new("/")] <ide> end <add> <add> def build_template(template, virtual_path, locals) <add> super(template, nil, locals) <add> end <ide> end <ide> end <ide><path>actionview/test/template/fallback_file_system_resolver_test.rb <add># frozen_string_literal: true <add> <add>require "abstract_unit" <add> <add>class FallbackFileSystemResolverTest < ActiveSupport::TestCase <add> def setup <add> @root_resolver = ActionView::FallbackFileSystemResolver.new("/") <add> end <add> <add> def test_should_have_no_virtual_path <add> templates = @root_resolver.find_all("hello_world.erb", "#{FIXTURE_LOAD_PATH}/test", false, locale: [], formats: [:html], variants: [], handlers: [:erb]) <add> assert_equal 1, templates.size <add> assert_equal "Hello world!", templates[0].source <add> assert_nil templates[0].virtual_path <add> end <add>end
2
Javascript
Javascript
fix error from bad merge in
a6a28d24c183d4252bd269d89b3ddc5f4ae88863
<ide><path>src/offset.js <ide> jQuery.fn.extend( { <ide> <ide> // Incorporate borders into its offset, since they are outside its content origin <ide> parentOffset = jQuery( offsetParent ).offset(); <del> parentOffset.top += jQuery.css( offsetParent[ 0 ], "borderTopWidth", true ); <del> parentOffset.left += jQuery.css( offsetParent[ 0 ], "borderLeftWidth", true ); <add> parentOffset.top += jQuery.css( offsetParent, "borderTopWidth", true ); <add> parentOffset.left += jQuery.css( offsetParent, "borderLeftWidth", true ); <ide> } <ide> } <ide>
1
Java
Java
update example with two observables
031af80c334410a4566bf38eab8ce0c384f818e3
<ide><path>rxjava-core/src/main/java/rx/observables/operations/OperationConcat.java <ide> public void testConcat() { <ide> public void testConcatUnsubscribe() { <ide> CountDownLatch callOnce = new CountDownLatch(1); <ide> CountDownLatch okToContinue = new CountDownLatch(1); <del> TestObservable w = new TestObservable(callOnce, okToContinue, "one", "two", "three"); <add> TestObservable w1 = new TestObservable(null, null, "one", "two", "three"); <add> TestObservable w2 = new TestObservable(callOnce, okToContinue, "four", "five", "six"); <ide> <ide> @SuppressWarnings("unchecked") <ide> Observer<String> aObserver = mock(Observer.class); <ide> @SuppressWarnings("unchecked") <del> Observable<String> concat = Observable.create(concat(w)); <add> Observable<String> concat = Observable.create(concat(w1, w2)); <ide> Subscription s1 = concat.subscribe(aObserver); <ide> <ide> try { <del> //Allow the observable to call onNext once. <add> //Block main thread to allow observable "w1" to complete and observable "w2" to call onNext once. <ide> callOnce.await(); <ide> s1.unsubscribe(); <ide> //Unblock the observable to continue. <ide> okToContinue.countDown(); <del> w.t.join(); <add> w1.t.join(); <add> w2.t.join(); <ide> } catch (Exception e) { <ide> e.printStackTrace(); <ide> fail(e.getMessage()); <ide> } <ide> <del> System.out.println("TestObservable thread finished"); <ide> verify(aObserver, times(1)).onNext("one"); <del> verify(aObserver, never()).onNext("two"); <del> verify(aObserver, never()).onNext("three"); <add> verify(aObserver, times(1)).onNext("two"); <add> verify(aObserver, times(1)).onNext("three"); <add> verify(aObserver, times(1)).onNext("four"); <add> verify(aObserver, never()).onNext("five"); <add> verify(aObserver, never()).onNext("six"); <ide> } <ide> <ide> private static class TestObservable extends Observable<String> { <ide> public Subscription call(Observer<String> t1) { <ide> <ide> @Override <ide> public Subscription subscribe(final Observer<String> observer) { <del> System.out.println("TestObservable subscribed to ..."); <ide> t = new Thread(new Runnable() { <ide> <ide> @Override <ide> public void run() { <ide> try { <del> System.out.println("running TestObservable thread"); <ide> while(count < values.length && subscribed) { <del> System.out.println("TestObservable onNext: " + s); <ide> observer.onNext(values[count]); <ide> count++; <ide> //Unblock the main thread to call unsubscribe. <del> once.countDown(); <add> if (null != once) <add> once.countDown(); <ide> //Block until the main thread has called unsubscribe. <del> okToContinue.await(); <del> } <del> <add> if (null != once) <add> okToContinue.await(); <add> } <ide> if (subscribed) <del> observer.onCompleted(); <del> } catch (Exception e) { <del> throw new RuntimeException(e); <add> observer.onCompleted(); <add> } catch (InterruptedException e) { <add> e.printStackTrace(); <add> fail(e.getMessage()); <ide> } <ide> } <ide> <ide> }); <del> System.out.println("starting TestObservable thread"); <ide> t.start(); <del> System.out.println("done starting TestObservable thread"); <ide> return s; <ide> } <ide>
1
Javascript
Javascript
guarantee the window opening order
9eb259ce2070f25906c8020d321a48503b3e9057
<ide><path>src/main-process/atom-application.js <ide> class AtomApplication extends EventEmitter { <ide> optionsForWindowsToOpen.push(options) <ide> } <ide> <del> return Promise.all( <del> optionsForWindowsToOpen.map(options => this.openWithOptions(options)) <del> ) <add> // Preserve window opening order <add> const windows = [] <add> for (const options of optionsForWindowsToOpen) { <add> windows.push(await this.openWithOptions(options)) <add> } <add> return windows <ide> } <ide> <ide> openWithOptions (options) {
1
Javascript
Javascript
fix sparse array comparison
4dd56a39f1f1c0e98b223b611c3a869775177c68
<ide><path>lib/internal/util/comparisons.js <ide> function objEquiv(a, b, strict, keys, memos, iterationType) { <ide> } else { <ide> // Array is sparse. <ide> const keysA = objectKeys(a); <del> i++; <ide> for (; i < keysA.length; i++) { <ide> const key = keysA[i]; <ide> if (!hasOwnProperty(b, key) || <del> !innerDeepEqual(a[key], b[i], strict, memos)) { <add> !innerDeepEqual(a[key], b[key], strict, memos)) { <ide> return false; <ide> } <ide> } <ide><path>test/parallel/test-assert-deep.js <ide> assertNotDeepOrStrict( <ide> assertDeepAndStrictEqual(m3, m4); <ide> } <ide> <del>// Handle sparse arrays <del>assertDeepAndStrictEqual([1, , , 3], [1, , , 3]); <del>assertOnlyDeepEqual([1, , , 3], [1, , , 3, , , ]); <add>// Handle sparse arrays. <add>{ <add> assertDeepAndStrictEqual([1, , , 3], [1, , , 3]); <add> assertOnlyDeepEqual([1, , , 3], [1, , , 3, , , ]); <add> const a = new Array(3); <add> const b = new Array(3); <add> a[2] = true; <add> b[1] = true; <add> assertNotDeepOrStrict(a, b); <add> b[2] = true; <add> assertNotDeepOrStrict(a, b); <add> a[0] = true; <add> assertNotDeepOrStrict(a, b); <add>} <ide> <ide> // Handle different error messages <ide> {
2
Javascript
Javascript
correct the coding style according to gjslint
f656e841acbd9697dfeffad74accb69796fba679
<ide><path>fonts.js <ide> var kMaxWaitForFontFace = 1000; <ide> * fonts and their acronyms. <ide> */ <ide> var stdFontMap = { <del> "Arial": "Helvetica", <del> "Arial_Bold": "Helvetica-Bold", <del> "Arial_BoldItalic": "Helvetica-BoldOblique", <del> "Arial_Italic": "Helvetica-Oblique", <del> "Arial_BoldItalicMT": "Helvetica-BoldOblique", <del> "Arial_BoldMT": "Helvetica-Bold", <del> "Arial_ItalicMT": "Helvetica-Oblique", <del> "ArialMT": "Helvetica", <del> "Courier_Bold": "Courier-Bold", <del> "Courier_BoldItalic": "Courier-BoldOblique", <del> "Courier_Italic": "Courier-Oblique", <del> "CourierNew": "Courier", <del> "CourierNew_Bold": "Courier-Bold", <del> "CourierNew_BoldItalic": "Courier-BoldOblique", <del> "CourierNew_Italic": "Courier-Oblique", <del> "CourierNewPS_BoldItalicMT": "Courier-BoldOblique", <del> "CourierNewPS_BoldMT": "Courier-Bold", <del> "CourierNewPS_ItalicMT": "Courier-Oblique", <del> "CourierNewPSMT": "Courier", <del> "Helvetica_Bold": "Helvetica-Bold", <del> "Helvetica_BoldItalic": "Helvetica-BoldOblique", <del> "Helvetica_Italic": "Helvetica-Oblique", <del> "Symbol_Bold": "Symbol", <del> "Symbol_BoldItalic": "Symbol", <del> "Symbol_Italic": "Symbol", <del> "TimesNewRoman": "Times-Roman", <del> "TimesNewRoman_Bold": "Times-Bold", <del> "TimesNewRoman_BoldItalic": "Times-BoldItalic", <del> "TimesNewRoman_Italic": "Times-Italic", <del> "TimesNewRomanPS": "Times-Roman", <del> "TimesNewRomanPS_Bold": "Times-Bold", <del> "TimesNewRomanPS_BoldItalic": "Times-BoldItalic", <del> "TimesNewRomanPS_BoldItalicMT": "Times-BoldItalic", <del> "TimesNewRomanPS_BoldMT": "Times-Bold", <del> "TimesNewRomanPS_Italic": "Times-Italic", <del> "TimesNewRomanPS_ItalicMT": "Times-Italic", <del> "TimesNewRomanPSMT": "Times-Roman", <del> "TimesNewRomanPSMT_Bold": "Times-Bold", <del> "TimesNewRomanPSMT_BoldItalic": "Times-BoldItalic", <del> "TimesNewRomanPSMT_Italic": "Times-Italic" <add> 'Arial': 'Helvetica', <add> 'Arial_Bold': 'Helvetica-Bold', <add> 'Arial_BoldItalic': 'Helvetica-BoldOblique', <add> 'Arial_Italic': 'Helvetica-Oblique', <add> 'Arial_BoldItalicMT': 'Helvetica-BoldOblique', <add> 'Arial_BoldMT': 'Helvetica-Bold', <add> 'Arial_ItalicMT': 'Helvetica-Oblique', <add> 'ArialMT': 'Helvetica', <add> 'Courier_Bold': 'Courier-Bold', <add> 'Courier_BoldItalic': 'Courier-BoldOblique', <add> 'Courier_Italic': 'Courier-Oblique', <add> 'CourierNew': 'Courier', <add> 'CourierNew_Bold': 'Courier-Bold', <add> 'CourierNew_BoldItalic': 'Courier-BoldOblique', <add> 'CourierNew_Italic': 'Courier-Oblique', <add> 'CourierNewPS_BoldItalicMT': 'Courier-BoldOblique', <add> 'CourierNewPS_BoldMT': 'Courier-Bold', <add> 'CourierNewPS_ItalicMT': 'Courier-Oblique', <add> 'CourierNewPSMT': 'Courier', <add> 'Helvetica_Bold': 'Helvetica-Bold', <add> 'Helvetica_BoldItalic': 'Helvetica-BoldOblique', <add> 'Helvetica_Italic': 'Helvetica-Oblique', <add> 'Symbol_Bold': 'Symbol', <add> 'Symbol_BoldItalic': 'Symbol', <add> 'Symbol_Italic': 'Symbol', <add> 'TimesNewRoman': 'Times-Roman', <add> 'TimesNewRoman_Bold': 'Times-Bold', <add> 'TimesNewRoman_BoldItalic': 'Times-BoldItalic', <add> 'TimesNewRoman_Italic': 'Times-Italic', <add> 'TimesNewRomanPS': 'Times-Roman', <add> 'TimesNewRomanPS_Bold': 'Times-Bold', <add> 'TimesNewRomanPS_BoldItalic': 'Times-BoldItalic', <add> 'TimesNewRomanPS_BoldItalicMT': 'Times-BoldItalic', <add> 'TimesNewRomanPS_BoldMT': 'Times-Bold', <add> 'TimesNewRomanPS_Italic': 'Times-Italic', <add> 'TimesNewRomanPS_ItalicMT': 'Times-Italic', <add> 'TimesNewRomanPSMT': 'Times-Roman', <add> 'TimesNewRomanPSMT_Bold': 'Times-Bold', <add> 'TimesNewRomanPSMT_BoldItalic': 'Times-BoldItalic', <add> 'TimesNewRomanPSMT_Italic': 'Times-Italic' <ide> }; <ide> <ide> var FontMeasure = (function FontMeasure() { <ide> var FontMeasure = (function FontMeasure() { <ide> if (!(measureCache = sizes[size])) <ide> measureCache = sizes[size] = Object.create(null); <ide> } else { <del> measureCache = null <add> measureCache = null; <ide> } <ide> <ide> var name = font.loadedName; <del> var bold = font.bold ? "bold" : "normal"; <del> var italic = font.italic ? "italic" : "normal"; <add> var bold = font.bold ? 'bold' : 'normal'; <add> var italic = font.italic ? 'italic' : 'normal'; <ide> size *= kScalePrecision; <del> var rule = bold + " " + italic + " " + size + 'px "' + name + '"'; <add> var rule = bold + ' ' + italic + ' ' + size + 'px "' + name + '"'; <ide> ctx.font = rule; <ide> }, <ide> measureText: function fonts_measureText(text) { <ide> var FontLoader = { <ide> if (!isWorker && rules.length) { <ide> FontLoader.prepareFontLoadEvent(rules, names, objs); <ide> } <del> <add> <ide> if (!checkFontsLoaded()) { <ide> document.documentElement.addEventListener( <ide> 'pdfjsFontLoad', checkFontsLoaded, false); <ide> var Font = (function Font() { <ide> <ide> if (!file) { <ide> var fontName = stdFontMap[name]; <del> this.bold = (fontName.indexOf("Bold") != -1); <del> this.italic = (fontName.indexOf("Oblique") != -1); <del> this.loadedName = fontName.split("-")[0]; <add> this.bold = (fontName.indexOf('Bold') != -1); <add> this.italic = (fontName.indexOf('Oblique') != -1); <add> this.loadedName = fontName.split('-')[0]; <ide> this.loading = false; <ide> return; <ide> } <ide> var Font = (function Font() { <ide> case 'Type1': <ide> case 'CIDFontType0': <ide> this.mimetype = 'font/opentype'; <del> <add> <ide> var subtype = properties.subtype; <ide> if (subtype === 'Type1C') { <ide> var cff = new Type2CFF(file, properties); <ide> var Font = (function Font() { <ide> <ide> // checksum <ide> var checksum = 0, n = data.length; <del> for (var i = 0; i < n; i+=4) <del> checksum = (checksum + int32([data[i], data[i+1], data[i+2], data[i+3]])) | 0; <add> for (var i = 0; i < n; i += 4) <add> checksum = (checksum + int32([data[i], data[i + 1], data[i + 2], data[i + 3]])) | 0; <ide> <ide> var tableEntry = (tag + string32(checksum) + <ide> string32(offset) + string32(length)); <ide> var Font = (function Font() { <ide> } <ide> ranges.push([start, end]); <ide> } <del> <add> <ide> return ranges; <ide> }; <ide> <ide> var Font = (function Font() { <ide> var range = ranges[i]; <ide> var start = range[0]; <ide> var end = range[1]; <del> var offset = (segCount - i) * 2 + bias * 2; <add> var offset = (segCount - i) * 2 + bias * 2; <ide> bias += (end - start + 1); <ide> <ide> startCount += string16(start); <ide> var Font = (function Font() { <ide> 'Unknown', // 8.Manufacturer <ide> 'Unknown' // 9.Designer <ide> ]; <del> <add> <ide> // Mac want 1-byte per character strings while Windows want <ide> // 2-bytes per character, so duplicate the names table <ide> var stringsUnicode = []; <ide> for (var i = 0; i < strings.length; i++) { <ide> var str = strings[i]; <del> <add> <ide> var strUnicode = ''; <ide> for (var j = 0; j < str.length; j++) <ide> strUnicode += string16(str.charCodeAt(j)); <ide> stringsUnicode.push(strUnicode); <ide> } <del> <add> <ide> var names = [strings, stringsUnicode]; <ide> var platforms = ['\x00\x01', '\x00\x03']; <ide> var encodings = ['\x00\x00', '\x00\x01']; <ide> var languages = ['\x00\x00', '\x04\x09']; <del> <add> <ide> var namesRecordCount = strings.length * platforms.length; <ide> var nameTable = <ide> '\x00\x00' + // format <ide> string16(namesRecordCount) + // Number of names Record <ide> string16(namesRecordCount * 12 + 6); // Storage <del> <add> <ide> // Build the name records field <ide> var strOffset = 0; <ide> for (var i = 0; i < platforms.length; i++) { <ide> var Font = (function Font() { <ide> strOffset += str.length; <ide> } <ide> } <del> <add> <ide> nameTable += strings.join('') + stringsUnicode.join(''); <ide> return nameTable; <ide> } <ide> var Font = (function Font() { <ide> encodingID: int16(font.getBytes(2)), <ide> offset: int32(font.getBytes(4)) <ide> }); <del> }; <add> } <ide> <ide> var encoding = properties.encoding; <ide> var charset = properties.charset; <ide> for (var i = 0; i < numRecords; i++) { <ide> var table = records[i]; <ide> font.pos = start + table.offset; <del> <add> <ide> var format = int16(font.getBytes(2)); <ide> var length = int16(font.getBytes(2)); <ide> var language = int16(font.getBytes(2)); <ide> var Font = (function Font() { <ide> // into the platform so if some characters in the font are assigned <ide> // under this limit they will not be displayed so let's rewrite the <ide> // CMap. <del> var glyphs = []; <add> var glyphs = []; <ide> var deltas = []; <ide> for (var j = 0; j < 256; j++) { <ide> var index = font.getByte(); <ide> if (index) { <ide> deltas.push(index); <del> glyphs.push({ unicode : j }); <add> glyphs.push({ unicode: j }); <ide> } <ide> } <ide> <ide> var rewrite = false; <ide> for (var code in encoding) { <del> if (code < 0x20 && encoding[code]) <add> if (code < 0x20 && encoding[code]) <ide> rewrite = true; <ide> <ide> if (rewrite) <ide> var Font = (function Font() { <ide> ((hmtx.length - numOfHMetrics * 4) >> 1); <ide> if (numMissing > 0) { <ide> font.pos = (font.start ? font.start : 0) + hmtx.offset; <del> var metrics = ""; <add> var metrics = ''; <ide> for (var i = 0; i < hmtx.length; i++) <ide> metrics += String.fromCharCode(font.getByte()); <ide> for (var i = 0; i < numMissing; i++) <del> metrics += "\x00\x00"; <add> metrics += '\x00\x00'; <ide> hmtx.data = stringToArray(metrics); <ide> } <ide> <ide> var Font = (function Font() { <ide> if (properties.type == 'CIDFontType2') { <ide> // Type2 composite fonts map characters directly to glyphs so the cmap <ide> // table must be replaced. <del> <add> <ide> var glyphs = []; <ide> var charset = properties.charset; <ide> if (!charset.length) { <ide> var Font = (function Font() { <ide> charsToUnicode: function fonts_chars2Unicode(chars) { <ide> var charsCache = this.charsCache; <ide> var str; <del> <add> <ide> // if we translated this string before, just grab it from the cache <ide> if (charsCache) { <ide> str = charsCache[chars]; <ide> if (str) <ide> return str; <ide> } <del> <add> <ide> // lazily create the translation cache <ide> if (!charsCache) <ide> charsCache = this.charsCache = Object.create(null); <del> <add> <ide> if (this.compositeFont) { <ide> // composite fonts have multi-byte strings <ide> // convert the string from single-byte to multi-byte <ide> // XXX assuming CIDFonts are two-byte - later need to extract the correct byte encoding <ide> // according to the PDF spec <ide> str = ''; <del> var multiByteStr = ""; <add> var multiByteStr = ''; <ide> var length = chars.length; <ide> for (var i = 0; i < length; i++) { <ide> var byte1 = chars.charCodeAt(i++) & 0xFF; <ide> var Font = (function Font() { <ide> var encoding = this.encoding; <ide> if (!encoding) <ide> return chars; <del> <add> <ide> str = ''; <ide> for (var i = 0; i < chars.length; ++i) { <ide> var charcode = chars.charCodeAt(i); <ide> var unicode = encoding[charcode]; <ide> if ('undefined' == typeof(unicode)) { <ide> // FIXME/issue 233: we're hitting this in test/pdf/sizes.pdf <ide> // at the moment, for unknown reasons. <del> warn('Unencoded charcode '+ charcode); <add> warn('Unencoded charcode ' + charcode); <ide> unicode = charcode; <ide> } <del> <add> <ide> // Check if the glyph has already been converted <ide> if (!IsNum(unicode)) <ide> unicode = encoding[unicode] = GlyphsUnicode[unicode.name]; <del> <add> <ide> // Handle surrogate pairs <ide> if (unicode > 0xFFFF) { <ide> str += String.fromCharCode(unicode & 0xFFFF); <ide> CFF.prototype = { <ide> BlueFuzz: '\x0c\x0b', <ide> BlueScale: '\x0c\x09', <ide> LanguageGroup: '\x0c\x11', <del> ExpansionFactor: '\x0c\x18' <add> ExpansionFactor: '\x0c\x18' <ide> }; <ide> for (var field in fieldMap) { <ide> if (!properties.private.hasOwnProperty(field)) continue; <ide> var Type2CFF = (function() { <ide> var bytes = file.getBytes(); <ide> this.bytes = bytes; <ide> this.properties = properties; <del> <add> <ide> // Other classes expect this.data to be a Javascript array <del> var data = [] <del> for (var i = 0, ii = bytes.length; i < ii; ++i) <add> var data = []; <add> for (var i = 0, ii = bytes.length; i < ii; ++i) <ide> data.push(bytes[i]); <ide> this.data = data; <ide> <ide> var Type2CFF = (function() { <ide> parse: function cff_parse() { <ide> var header = this.parseHeader(); <ide> var nameIndex = this.parseIndex(header.endPos); <del> <add> <ide> var dictIndex = this.parseIndex(nameIndex.endPos); <ide> if (dictIndex.length != 1) <ide> error('More than 1 font'); <del> <add> <ide> var stringIndex = this.parseIndex(dictIndex.endPos); <ide> var gsubrIndex = this.parseIndex(stringIndex.endPos); <ide> <ide> var Type2CFF = (function() { <ide> var privOffset = privInfo[1], privLength = privInfo[0]; <ide> var privBytes = bytes.subarray(privOffset, privOffset + privLength); <ide> baseDict = this.parseDict(privBytes); <del> var privDict = this.getPrivDict(baseDict, strings); <add> var privDict = this.getPrivDict(baseDict, strings); <ide> <ide> TODO('Parse encoding'); <ide> var charStrings = this.parseIndex(topDict['CharStrings']); <ide> var Type2CFF = (function() { <ide> // containing mappings for {unicode, width}) <ide> var charstrings = this.getCharStrings(charset, charStrings, <ide> privDict, this.properties); <del> <add> <ide> // create the mapping between charstring and glyph id <ide> var glyphIds = []; <ide> for (var i = 0, ii = charstrings.length; i < ii; ++i) { <ide> var Type2CFF = (function() { <ide> var pair = baseDict[i]; <ide> var key = pair[0]; <ide> var value = pair[1]; <del> switch(key) { <add> switch (key) { <ide> case 20: <ide> dict['defaultWidthX'] = value[0]; <ide> case 21: <ide> var Type2CFF = (function() { <ide> var pair = baseDict[i]; <ide> var key = pair[0]; <ide> var value = pair[1]; <del> switch(key) { <add> switch (key) { <ide> case 1: <ide> dict['Notice'] = strings[value[0]]; <ide> break; <ide> var Type2CFF = (function() { <ide> }, <ide> getStrings: function cff_getstrings(stringIndex) { <ide> function bytesToString(bytesArr) { <del> var s = ""; <add> var s = ''; <ide> for (var i = 0, ii = bytesArr.length; i < ii; ++i) <ide> s += String.fromCharCode(bytesArr[i]); <ide> return s; <ide> var Type2CFF = (function() { <ide> var bytes = this.bytes; <ide> var offset = 0; <ide> <del> while(bytes[offset] != 1) <add> while (bytes[offset] != 1) <ide> ++offset; <ide> <ide> if (offset != 0) { <del> warning("cff data is shifted"); <add> warning('cff data is shifted'); <ide> bytes = bytes.subarray(offset); <ide> this.bytes = bytes; <ide> } <ide> <ide> return { <ide> endPos: bytes[2], <ide> offsetSize: bytes[3] <del> } <add> }; <ide> }, <ide> parseDict: function cff_parseDict(dict) { <ide> var pos = 0; <ide> var Type2CFF = (function() { <ide> }; <ide> <ide> function parseFloatOperand() { <del> var str = ""; <add> var str = ''; <ide> var eof = 15; <ide> var lookup = ['0', '1', '2', '3', '4', '5', '6', '7', '8', <ide> '9', '.', 'E', 'E-', null, '-']; <ide> var Type2CFF = (function() { <ide> <ide> var operands = []; <ide> var entries = []; <del> <add> <ide> var pos = 0; <ide> var end = dict.length; <ide> while (pos < end) { <ide> var Type2CFF = (function() { <ide> }, <ide> length: count, <ide> endPos: end <del> } <del> }, <add> }; <add> } <ide> }; <ide> <ide> return constructor; <ide><path>pdf.js <ide> function backtrace() { <ide> var stackStr; <ide> try { <ide> throw new Error(); <del> } catch(e) { <add> } catch (e) { <ide> stackStr = e.stack; <del> }; <add> } <ide> return stackStr.split('\n').slice(1).join('\n'); <ide> } <ide> <ide> var AsciiHexStream = (function() { <ide> function constructor(str) { <ide> this.str = str; <ide> this.dict = str.dict; <del> <add> <ide> DecodeStream.call(this); <ide> } <del> <add> <ide> var hexvalueMap = { <ide> 9: -1, // \t <ide> 32: -1, // space <ide> var AsciiHexStream = (function() { <ide> }; <ide> <ide> constructor.prototype = Object.create(DecodeStream.prototype); <del> <add> <ide> constructor.prototype.readBlock = function() { <del> var gtCode = '>'.charCodeAt(0), bytes = this.str.getBytes(), c, n, <add> var gtCode = '>'.charCodeAt(0), bytes = this.str.getBytes(), c, n, <ide> decodeLength, buffer, bufferLength, i, length; <del> <add> <ide> decodeLength = (bytes.length + 1) >> 1; <ide> buffer = this.ensureBuffer(this.bufferLength + decodeLength); <ide> bufferLength = this.bufferLength; <del> <del> for(i = 0, length = bytes.length; i < length; i++) { <add> <add> for (i = 0, length = bytes.length; i < length; i++) { <ide> c = hexvalueMap[bytes[i]]; <del> while (c == -1 && (i+1) < length) { <add> while (c == -1 && (i + 1) < length) { <ide> c = hexvalueMap[bytes[++i]]; <ide> } <del> <del> if((i+1) < length && (bytes[i+1] !== gtCode)) { <add> <add> if ((i + 1) < length && (bytes[i + 1] !== gtCode)) { <ide> n = hexvalueMap[bytes[++i]]; <del> buffer[bufferLength++] = c*16+n; <add> buffer[bufferLength++] = c * 16 + n; <ide> } else { <del> if(bytes[i] !== gtCode) { // EOD marker at an odd number, behave as if a 0 followed the last digit. <del> buffer[bufferLength++] = c*16; <add> if (bytes[i] !== gtCode) { // EOD marker at an odd number, behave as if a 0 followed the last digit. <add> buffer[bufferLength++] = c * 16; <ide> } <ide> } <ide> } <del> <add> <ide> this.bufferLength = bufferLength; <ide> this.eof = true; <ide> }; <del> <add> <ide> return constructor; <ide> })(); <ide> <ide> var Page = (function() { <ide> create: Date.now(), <ide> compile: 0.0, <ide> fonts: 0.0, <del> render: 0.0, <add> render: 0.0 <ide> }; <ide> this.xref = xref; <ide> } <ide> var Page = (function() { <ide> return shadow(this, 'height', height); <ide> }, <ide> get rotate() { <del> var rotate = this.inheritPageProp("Rotate") || 0; <add> var rotate = this.inheritPageProp('Rotate') || 0; <ide> // Normalize rotation so it's a multiple of 90 and between 0 and 270 <ide> if (rotate % 90 != 0) { <ide> rotate = 0; <ide> var Page = (function() { <ide> stats.compile = stats.fonts = stats.render = 0; <ide> <ide> var gfx = new CanvasGraphics(canvasCtx); <del> var fonts = [ ]; <add> var fonts = []; <ide> <ide> this.compile(gfx, fonts); <ide> stats.compile = Date.now(); <ide> var Page = (function() { <ide> stats.fonts = Date.now(); <ide> // Always defer call to display() to work around bug in <ide> // Firefox error reporting from XHR callbacks. <del> setTimeout(function () { <add> setTimeout(function() { <ide> var exc = null; <ide> try { <ide> self.display(gfx); <ide> var EvalState = (function() { <ide> var PartialEvaluator = (function() { <ide> function constructor() { <ide> this.state = new EvalState(); <del> this.stateStack = [ ]; <add> this.stateStack = []; <ide> } <ide> <ide> var OP_MAP = { <ide> var PartialEvaluator = (function() { <ide> eval: function(stream, xref, resources, fonts) { <ide> resources = xref.fetchIfRef(resources) || new Dict(); <ide> var xobjs = xref.fetchIfRef(resources.get('XObject')) || new Dict(); <del> var patterns = xref.fetchIfRef(resources.get("Pattern")) || new Dict(); <add> var patterns = xref.fetchIfRef(resources.get('Pattern')) || new Dict(); <ide> var parser = new Parser(new Lexer(stream), false); <ide> var args = [], argsArray = [], fnArray = [], obj; <del> <add> <ide> while (!IsEOF(obj = parser.getObj())) { <ide> if (IsCmd(obj)) { <ide> var cmd = obj.cmd; <ide> var PartialEvaluator = (function() { <ide> var pattern = xref.fetchIfRef(patterns.get(patternName.name)); <ide> if (pattern) { <ide> var dict = IsStream(pattern) ? pattern.dict : pattern; <del> var typeNum = dict.get("PatternType"); <add> var typeNum = dict.get('PatternType'); <ide> if (typeNum == 1) { <ide> patternName.code = this.eval(pattern, xref, <ide> dict.get('Resources'), fonts); <ide> var PartialEvaluator = (function() { <ide> } <ide> <ide> return function(gfx) { <del> for(var i = 0, length = argsArray.length; i < length; i++) <add> for (var i = 0, length = argsArray.length; i < length; i++) <ide> gfx[fnArray[i]].apply(gfx, argsArray[i]); <ide> } <ide> }, <ide> var PartialEvaluator = (function() { <ide> var subType = fontDict.get('Subtype'); <ide> var compositeFont = false; <ide> assertWellFormed(IsName(subType), 'invalid font Subtype'); <del> <del> // If font is a composite <add> <add> // If font is a composite <ide> // - get the descendant font <ide> // - set the type according to the descendant font <ide> // - get the FontDescriptor from the descendant font <ide> var PartialEvaluator = (function() { <ide> } else { <ide> fd = fontDict.get('FontDescriptor'); <ide> } <del> <add> <ide> if (!fd) <ide> return null; <del> <add> <ide> var descriptor = xref.fetch(fd); <ide> <ide> var fontName = xref.fetchIfRef(descriptor.get('FontName')); <ide> var PartialEvaluator = (function() { <ide> var glyphsData = glyphsStream.getBytes(0); <ide> var i = 0; <ide> // Glyph ids are big-endian 2-byte values <del> for (var j=0; j<glyphsData.length; j++) { <add> for (var j = 0; j < glyphsData.length; j++) { <ide> var glyphID = (glyphsData[j++] << 8) | glyphsData[j]; <ide> charset.push(glyphID); <ide> } <ide> var PartialEvaluator = (function() { <ide> if (IsName(encoding)) { <ide> // Encoding is a predefined CMap <ide> if (encoding.name == 'Identity-H') { <del> TODO ('Need to create an identity cmap') <add> TODO('Need to create an identity cmap'); <ide> } else { <del> TODO ('Need to support predefined CMaps see PDF 32000-1:2008 9.7.5.2 Predefined CMaps') <add> TODO('Need to support predefined CMaps see PDF 32000-1:2008 9.7.5.2 Predefined CMaps'); <ide> } <ide> } else { <del> TODO ('Need to support encoding streams see PDF 32000-1:2008 9.7.5.3'); <add> TODO('Need to support encoding streams see PDF 32000-1:2008 9.7.5.3'); <ide> } <ide> } <ide> } else if (fontDict.has('Encoding')) { <ide> var PartialEvaluator = (function() { <ide> <ide> return { <ide> name: fontName, <del> fontDict: fontDict, <add> fontDict: fontDict, <ide> file: fontFile, <ide> properties: properties <ide> }; <del> }, <add> } <ide> }; <ide> <ide> return constructor; <ide> var CanvasGraphics = (function() { <ide> stroke: function() { <ide> var ctx = this.ctx; <ide> var strokeColor = this.current.strokeColor; <del> if (strokeColor && strokeColor.type === "Pattern") { <add> if (strokeColor && strokeColor.type === 'Pattern') { <ide> // for patterns, we transform to pattern space, calculate <ide> // the pattern, call stroke, and restore to user space <ide> ctx.save(); <ide> var CanvasGraphics = (function() { <ide> var ctx = this.ctx; <ide> var fillColor = this.current.fillColor; <ide> <del> if (fillColor && fillColor.type === "Pattern") { <add> if (fillColor && fillColor.type === 'Pattern') { <ide> ctx.save(); <ide> ctx.fillStyle = fillColor.getPattern(ctx); <ide> ctx.fill(); <ide> var CanvasGraphics = (function() { <ide> var ctx = this.ctx; <ide> <ide> var fillColor = this.current.fillColor; <del> if (fillColor && fillColor.type === "Pattern") { <add> if (fillColor && fillColor.type === 'Pattern') { <ide> ctx.save(); <ide> ctx.fillStyle = fillColor.getPattern(ctx); <ide> ctx.fill(); <ide> ctx.restore(); <ide> } else { <ide> ctx.fill(); <ide> } <del> <add> <ide> var strokeColor = this.current.strokeColor; <del> if (strokeColor && strokeColor.type === "Pattern") { <add> if (strokeColor && strokeColor.type === 'Pattern') { <ide> ctx.save(); <ide> ctx.strokeStyle = strokeColor.getPattern(ctx); <ide> ctx.stroke(); <ide> ctx.restore(); <ide> } else { <ide> ctx.stroke(); <ide> } <del> <add> <ide> this.consumePath(); <ide> }, <ide> eoFillStroke: function() { <ide> var CanvasGraphics = (function() { <ide> <ide> size = (size <= kRasterizerMin) ? size * kScalePrecision : size; <ide> <del> var bold = fontObj.bold ? "bold" : "normal"; <del> var italic = fontObj.italic ? "italic" : "normal"; <del> var rule = bold + " " + italic + " " + size + 'px "' + name + '"'; <add> var bold = fontObj.bold ? 'bold' : 'normal'; <add> var italic = fontObj.italic ? 'italic' : 'normal'; <add> var rule = bold + ' ' + italic + ' ' + size + 'px "' + name + '"'; <ide> this.ctx.font = rule; <ide> } <ide> }, <ide> var CanvasGraphics = (function() { <ide> ctx.save(); <ide> ctx.transform.apply(ctx, current.textMatrix); <ide> ctx.scale(1, -1); <del> <add> <ide> ctx.translate(current.x, -1 * current.y); <ide> <ide> var scaleFactorX = 1, scaleFactorY = 1; <ide> var CanvasGraphics = (function() { <ide> <ide> // Color <ide> setStrokeColorSpace: function(space) { <del> this.current.strokeColorSpace = <add> this.current.strokeColorSpace = <ide> ColorSpace.parse(space, this.xref, this.res); <ide> }, <ide> setFillColorSpace: function(space) { <del> this.current.fillColorSpace = <add> this.current.fillColorSpace = <ide> ColorSpace.parse(space, this.xref, this.res); <ide> }, <ide> setStrokeColor: function(/*...*/) { <ide> var CanvasGraphics = (function() { <ide> var y0 = Math.min(bl[1], br[1], ul[1], ur[1]); <ide> var x1 = Math.max(bl[0], br[0], ul[0], ur[0]); <ide> var y1 = Math.max(bl[1], br[1], ul[1], ur[1]); <del> <add> <ide> this.ctx.fillRect(x0, y0, x1 - x0, y1 - y0); <ide> } else { <ide> // HACK to draw the gradient onto an infinite rectangle. <ide> // PDF gradients are drawn across the entire image while <ide> // Canvas only allows gradients to be drawn in a rectangle <ide> // The following bug should allow us to remove this. <ide> // https://bugzilla.mozilla.org/show_bug.cgi?id=664884 <del> <add> <ide> this.ctx.fillRect(-1e10, -1e10, 2e10, 2e10); <ide> } <ide> <ide> var CanvasGraphics = (function() { <ide> }, <ide> restoreFillRule: function(rule) { <ide> this.ctx.mozFillRule = rule; <del> }, <add> } <ide> }; <ide> <ide> return constructor; <ide> var Util = (function() { <ide> var yt = p[0] * m[1] + p[1] * m[3] + m[5]; <ide> return [xt, yt]; <ide> }; <del> <add> <ide> return constructor; <ide> })(); <ide> <ide> var ColorSpace = (function() { <ide> error("unimplemented color space object '" + mode + "'"); <ide> } <ide> } else { <del> error('unrecognized color space object: "'+ cs +"'"); <add> error('unrecognized color space object: "' + cs + "'"); <ide> } <ide> }; <ide> <ide> var ColorSpace = (function() { <ide> <ide> var SeparationCS = (function() { <ide> function constructor(base, tintFn) { <del> this.name = "Separation"; <add> this.name = 'Separation'; <ide> this.numComps = 1; <ide> this.defaultColor = [1]; <ide> <ide> var DeviceCmykCS = (function() { <ide> r += 0.1373 * x; <ide> g += 0.1216 * x; <ide> b += 0.1255 * x; <del> x = c1 * m1 * y * k1; // 0 0 1 0 <add> x = c1 * m1 * y * k1; // 0 0 1 0 <ide> r += x; <ide> g += 0.9490 * x; <del> x = c1 * m1 * y * k; // 0 0 1 1 <add> x = c1 * m1 * y * k; // 0 0 1 1 <ide> r += 0.1098 * x; <ide> g += 0.1020 * x; <del> x = c1 * m * y1 * k1; // 0 1 0 0 <add> x = c1 * m * y1 * k1; // 0 1 0 0 <ide> r += 0.9255 * x; <ide> b += 0.5490 * x; <del> x = c1 * m * y1 * k; // 0 1 0 1 <add> x = c1 * m * y1 * k; // 0 1 0 1 <ide> r += 0.1412 * x; <del> x = c1 * m * y * k1; // 0 1 1 0 <add> x = c1 * m * y * k1; // 0 1 1 0 <ide> r += 0.9294 * x; <ide> g += 0.1098 * x; <ide> b += 0.1412 * x; <del> x = c1 * m * y * k; // 0 1 1 1 <add> x = c1 * m * y * k; // 0 1 1 1 <ide> r += 0.1333 * x; <del> x = c * m1 * y1 * k1; // 1 0 0 0 <add> x = c * m1 * y1 * k1; // 1 0 0 0 <ide> g += 0.6784 * x; <ide> b += 0.9373 * x; <del> x = c * m1 * y1 * k; // 1 0 0 1 <add> x = c * m1 * y1 * k; // 1 0 0 1 <ide> g += 0.0588 * x; <ide> b += 0.1412 * x; <del> x = c * m1 * y * k1; // 1 0 1 0 <add> x = c * m1 * y * k1; // 1 0 1 0 <ide> g += 0.6510 * x; <ide> b += 0.3137 * x; <del> x = c * m1 * y * k; // 1 0 1 1 <add> x = c * m1 * y * k; // 1 0 1 1 <ide> g += 0.0745 * x; <del> x = c * m * y1 * k1; // 1 1 0 0 <add> x = c * m * y1 * k1; // 1 1 0 0 <ide> r += 0.1804 * x; <ide> g += 0.1922 * x; <ide> b += 0.5725 * x; <del> x = c * m * y1 * k; // 1 1 0 1 <add> x = c * m * y1 * k; // 1 1 0 1 <ide> b += 0.0078 * x; <del> x = c * m * y * k1; // 1 1 1 0 <add> x = c * m * y * k1; // 1 1 1 0 <ide> r += 0.2118 * x; <ide> g += 0.2119 * x; <ide> b += 0.2235 * x; <ide> var DeviceCmykCS = (function() { <ide> for (var i = 0; i < length; i++) { <ide> var cmyk = []; <ide> for (var j = 0; j < 4; ++j) <del> cmyk.push(colorBuf[colorBufPos++]/255); <add> cmyk.push(colorBuf[colorBufPos++] / 255); <ide> <ide> var rgb = this.getRgb(cmyk); <ide> for (var j = 0; j < 3; ++j) <ide> var Pattern = (function() { <ide> // Output: the appropriate fillStyle or strokeStyle <ide> getPattern: function pattern_getStyle(ctx) { <ide> error('Should not call Pattern.getStyle'); <del> }, <add> } <ide> }; <ide> <ide> constructor.parse = function pattern_parse(args, cs, xref, res, ctx) { <ide> var length = args.length; <ide> <ide> var patternName = args[length - 1]; <ide> if (!IsName(patternName)) <del> error("Bad args to getPattern"); <add> error('Bad args to getPattern'); <ide> <del> var patternRes = xref.fetchIfRef(res.get("Pattern")); <add> var patternRes = xref.fetchIfRef(res.get('Pattern')); <ide> if (!patternRes) <del> error("Unable to find pattern resource"); <add> error('Unable to find pattern resource'); <ide> <ide> var pattern = xref.fetchIfRef(patternRes.get(patternName.name)); <ide> var dict = IsStream(pattern) ? pattern.dict : pattern; <del> var typeNum = dict.get("PatternType"); <add> var typeNum = dict.get('PatternType'); <ide> <del> switch(typeNum) { <add> switch (typeNum) { <ide> case 1: <ide> var base = cs.base; <ide> var color; <ide> var RadialAxialShading = (function() { <ide> } else if (type == 3) { <ide> var p0 = [coordsArr[0], coordsArr[1]]; <ide> var p1 = [coordsArr[3], coordsArr[4]]; <del> var r0 = coordsArr[2], r1 = coordsArr[5] <add> var r0 = coordsArr[2], r1 = coordsArr[5]; <ide> } else { <del> error() <add> error(); <ide> } <ide> <ide> var matrix = this.matrix; <ide> var RadialAxialShading = (function() { <ide> <ide> var TilingPattern = (function() { <ide> var PAINT_TYPE_COLORED = 1, PAINT_TYPE_UNCOLORED = 2; <del> <add> <ide> function constructor(pattern, code, dict, color, xref, ctx) { <ide> function multiply(m, tm) { <ide> var a = m[0] * tm[0] + m[1] * tm[2]; <ide> var TilingPattern = (function() { <ide> <ide> TODO('TilingType'); <ide> <del> this.matrix = dict.get("Matrix"); <add> this.matrix = dict.get('Matrix'); <ide> this.curMatrix = ctx.mozCurrentTransform; <ide> this.invMatrix = ctx.mozCurrentTransformInverse; <ide> this.ctx = ctx; <ide> var TilingPattern = (function() { <ide> <ide> var topLeft = [x0, y0]; <ide> // we want the canvas to be as large as the step size <del> var botRight = [x0 + xstep, y0 + ystep] <add> var botRight = [x0 + xstep, y0 + ystep]; <ide> <ide> var width = botRight[0] - topLeft[0]; <ide> var height = botRight[1] - topLeft[1]; <ide> <ide> // TODO: hack to avoid OOM, we would idealy compute the tiling <ide> // pattern to be only as large as the acual size in device space <del> // This could be computed with .mozCurrentTransform, but still <add> // This could be computed with .mozCurrentTransform, but still <ide> // needs to be implemented <ide> while (Math.abs(width) > 512 || Math.abs(height) > 512) { <ide> width = 512; <ide> var PDFFunction = (function() { <ide> var c0 = dict.get('C0') || [0]; <ide> var c1 = dict.get('C1') || [1]; <ide> var n = dict.get('N'); <del> <add> <ide> if (!IsArray(c0) || !IsArray(c1)) <ide> error('Illegal dictionary for interpolated function'); <ide> <ide> var PDFFunction = (function() { <ide> for (var i = 0; i < length; ++i) <ide> diff.push(c1[i] - c0[i]); <ide> <del> this.func = function (args) { <add> this.func = function(args) { <ide> var x = args[0]; <ide> <ide> var out = []; <ide> var PDFFunction = (function() { <ide> }, <ide> constructStiched: function() { <ide> TODO('unhandled type of function'); <del> this.func = function () { return [ 255, 105, 180 ]; } <add> this.func = function() { return [255, 105, 180]; } <ide> }, <ide> constructPostScript: function() { <ide> TODO('unhandled type of function'); <del> this.func = function () { return [ 255, 105, 180 ]; } <add> this.func = function() { return [255, 105, 180]; } <ide> } <ide> }; <ide> <ide><path>web/viewer.js <ide> var PDFView = { <ide> <ide> set page(val) { <ide> var pages = this.pages; <del> var input = document.getElementById("pageNumber"); <add> var input = document.getElementById('pageNumber'); <ide> if (val <= 0 || val > pages.length) { <ide> input.value = this.page; <ide> return; <ide> } <del> <add> <ide> document.location.hash = val; <del> document.getElementById("previous").disabled = (val == 1); <del> document.getElementById("next").disabled = (val == pages.length); <add> document.getElementById('previous').disabled = (val == 1); <add> document.getElementById('next').disabled = (val == pages.length); <ide> if (input.value == val) <ide> return; <ide> <ide> var PDFView = { <ide> }, <ide> <ide> open: function(url, scale) { <del> if (url.indexOf("http") == 0) <add> if (url.indexOf('http') == 0) <ide> return; <ide> <ide> document.title = url; <ide> var PDFView = { <ide> var page = pdf.getPage(i); <ide> pages.push(new PageView(container, page, i, page.width, page.height, page.stats)); <ide> thumbnails.push(new ThumbnailView(sidebar, pages[i - 1])); <del> }; <add> } <ide> <ide> this.scale = (scale || kDefaultScale); <ide> this.page = parseInt(document.location.hash.substring(1)) || 1; <ide> var PDFView = { <ide> } <ide> <ide> return visiblePages; <del> }, <add> } <ide> }; <ide> <ide> var PageView = function(container, content, id, width, height, stats) { <ide> var PageView = function(container, content, id, width, height, stats) { <ide> <ide> this.update = function(scale) { <ide> this.scale = scale || this.scale; <del> div.style.width = (this.width * this.scale)+ 'px'; <add> div.style.width = (this.width * this.scale) + 'px'; <ide> div.style.height = (this.height * this.scale) + 'px'; <ide> <ide> while (div.hasChildNodes()) <ide> var PageView = function(container, content, id, width, height, stats) { <ide> <ide> this.updateStats = function() { <ide> var t1 = stats.compile, t2 = stats.fonts, t3 = stats.render; <del> var str = 'Time to compile/fonts/render: ' + <add> var str = 'Time to compile/fonts/render: ' + <ide> (t1 - stats.begin) + '/' + (t2 - t1) + '/' + (t3 - t2) + ' ms'; <ide> document.getElementById('info').innerHTML = str; <ide> }; <ide> window.addEventListener('scroll', function(evt) { <ide> PDFView.page = firstPage.id; <ide> }, true); <ide> <del>window.addEventListener("hashchange", function(evt) { <add>window.addEventListener('hashchange', function(evt) { <ide> PDFView.page = PDFView.page; <ide> }); <ide> <del>window.addEventListener("change", function(evt) { <add>window.addEventListener('change', function(evt) { <ide> var files = evt.target.files; <ide> if (!files || files.length == 0) <ide> return; <ide> window.addEventListener("change", function(evt) { <ide> document.location.hash = 1; <ide> }, true); <ide> <del>window.addEventListener("transitionend", function(evt) { <add>window.addEventListener('transitionend', function(evt) { <ide> var pageIndex = 0; <ide> var pagesCount = PDFView.pages.length; <ide> <ide> var container = document.getElementById('sidebarView'); <ide> container._interval = window.setInterval(function() { <ide> if (pageIndex >= pagesCount) <ide> return window.clearInterval(container._interval); <del> <add> <ide> PDFView.thumbnails[pageIndex++].draw(); <ide> }, 500); <ide> }, true);
3
PHP
PHP
add more tests around pluginapplication methods
c6a7aea9585e992b9c8a75daaee27d847e8d3e94
<ide><path>src/Core/PluginApp.php <ide> protected function checkHook($hook) <ide> */ <ide> public function routes($routes) <ide> { <del> $routes = __DIR__ . 'config' . DS . 'routes.php'; <del> if (file_exists($routes)) { <del> require_once $routes; <add> $path = $this->getConfigPath() . DS . 'routes.php'; <add> if (file_exists($path)) { <add> require_once $path; <ide> } <ide> } <ide> <ide> public function routes($routes) <ide> */ <ide> public function bootstrap() <ide> { <del> $bootstrap = __DIR__ . 'config' . DS . 'bootstrap.php'; <add> $bootstrap = $this->getConfigPath() . DS . 'bootstrap.php'; <ide> if (file_exists($bootstrap)) { <ide> require_once $bootstrap; <ide> } <ide><path>tests/TestCase/Http/BaseApplicationTest.php <ide> <?php <ide> namespace Cake\Test\TestCase; <ide> <add>use Cake\Core\Configure; <add>use Cake\Core\Plugin; <ide> use Cake\Http\BaseApplication; <add>use Cake\Http\MiddlewareQueue; <ide> use Cake\Http\Response; <ide> use Cake\Http\ServerRequestFactory; <add>use Cake\Routing\RouteBuilder; <add>use Cake\Routing\RouteCollection; <ide> use Cake\TestSuite\TestCase; <ide> use InvalidArgumentException; <ide> use TestPlugin\Plugin as TestPlugin; <ide> public function setUp() <ide> $this->path = dirname(dirname(__DIR__)); <ide> } <ide> <add> public function tearDown() <add> { <add> parent::tearDown(); <add> Plugin::unload(); <add> } <add> <ide> /** <ide> * Integration test for a simple controller. <ide> * <ide> public function testAddPluginValid() <ide> $this->assertCount(1, $app->getPlugins()); <ide> $this->assertTrue($app->getPlugins()->has('TestPlugin')); <ide> } <add> <add> public function testPluginEvents() <add> { <add> $app = $this->getMockForAbstractClass( <add> BaseApplication::class, <add> [$this->path] <add> ); <add> $start = $app->getEventManager(); <add> $this->assertCount(0, $start->listeners('TestPlugin.load')); <add> <add> $app->addPlugin(TestPlugin::class); <add> $this->assertNull($app->pluginEvents()); <add> <add> $after = $app->getEventManager(); <add> $this->assertSame($after, $start); <add> $this->assertCount(1, $after->listeners('TestPlugin.load')); <add> } <add> <add> public function testPluginMiddleware() <add> { <add> $start = new MiddlewareQueue(); <add> $app = $this->getMockForAbstractClass( <add> BaseApplication::class, <add> [$this->path] <add> ); <add> $app->addPlugin(TestPlugin::class); <add> <add> $after = $app->pluginMiddleware($start); <add> $this->assertSame($start, $after); <add> $this->assertCount(1, $after); <add> } <add> <add> public function testPluginRoutes() <add> { <add> $collection = new RouteCollection(); <add> $routes = new RouteBuilder($collection, '/'); <add> $app = $this->getMockForAbstractClass( <add> BaseApplication::class, <add> [$this->path] <add> ); <add> $app->addPlugin(TestPlugin::class); <add> <add> $result = $app->pluginRoutes($routes); <add> $this->assertSame($routes, $result); <add> $url = [ <add> 'plugin' => 'TestPlugin', <add> 'controller' => 'TestPlugin', <add> 'action' => 'index', <add> '_method' => 'GET' <add> ]; <add> $this->assertNotEmpty($collection->match($url, [])); <add> } <add> <add> public function testPluginBootstrap() <add> { <add> $app = $this->getMockForAbstractClass( <add> BaseApplication::class, <add> [$this->path] <add> ); <add> $app->addPlugin(TestPlugin::class); <add> <add> $this->assertFalse(Configure::check('PluginTest.test_plugin.bootstrap')); <add> $this->assertNull($app->pluginBootstrap()); <add> $this->assertTrue(Configure::check('PluginTest.test_plugin.bootstrap')); <add> } <ide> } <ide><path>tests/test_app/Plugin/TestPlugin/src/Plugin.php <ide> namespace TestPlugin; <ide> <ide> use Cake\Core\PluginApp; <add>use Cake\Event\EventManagerInterface; <ide> <ide> class Plugin extends PluginApp <ide> { <add> public function events(EventManagerInterface $events) <add> { <add> $events->on('TestPlugin.load', function () { <add> }); <ide> <add> return $events; <add> } <add> <add> public function middleware($middleware) <add> { <add> $middleware->add(function ($req, $res, $next) { <add> return $next($req, $res); <add> }); <add> <add> return $middleware; <add> } <ide> }
3
Go
Go
print consistent help with options and commands
f3ed7b601fa6151868416254e79a18751c5ff024
<ide><path>api/client/commands.go <ide> import ( <ide> "github.com/docker/docker/nat" <ide> "github.com/docker/docker/opts" <ide> "github.com/docker/docker/pkg/log" <add> flag "github.com/docker/docker/pkg/mflag" <ide> "github.com/docker/docker/pkg/parsers" <ide> "github.com/docker/docker/pkg/parsers/filters" <ide> "github.com/docker/docker/pkg/signal" <ide> func (cli *DockerCli) CmdHelp(args ...string) error { <ide> return nil <ide> } <ide> } <del> help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) <del> for _, command := range [][]string{ <del> {"attach", "Attach to a running container"}, <del> {"build", "Build an image from a Dockerfile"}, <del> {"commit", "Create a new image from a container's changes"}, <del> {"cp", "Copy files/folders from a container's filesystem to the host path"}, <del> {"diff", "Inspect changes on a container's filesystem"}, <del> {"events", "Get real time events from the server"}, <del> {"export", "Stream the contents of a container as a tar archive"}, <del> {"history", "Show the history of an image"}, <del> {"images", "List images"}, <del> {"import", "Create a new filesystem image from the contents of a tarball"}, <del> {"info", "Display system-wide information"}, <del> {"inspect", "Return low-level information on a container"}, <del> {"kill", "Kill a running container"}, <del> {"load", "Load an image from a tar archive"}, <del> {"login", "Register or log in to a Docker registry server"}, <del> {"logout", "Log out from a Docker registry server"}, <del> {"logs", "Fetch the logs of a container"}, <del> {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, <del> {"pause", "Pause all processes within a container"}, <del> {"ps", "List containers"}, <del> {"pull", "Pull an image or a repository from a Docker registry server"}, <del> {"push", "Push an image or a repository to a Docker registry server"}, <del> {"restart", "Restart a running container"}, <del> {"rm", "Remove one or more containers"}, <del> {"rmi", "Remove one or more images"}, <del> {"run", "Run a command in a new container"}, <del> {"save", "Save an image to a tar archive"}, <del> {"search", "Search for an image on the Docker Hub"}, <del> {"start", "Start a stopped container"}, <del> {"stop", "Stop a running container"}, <del> {"tag", "Tag an image into a repository"}, <del> {"top", "Lookup the running processes of a container"}, <del> {"unpause", "Unpause a paused container"}, <del> {"version", "Show the Docker version information"}, <del> {"wait", "Block until a container stops, then print its exit code"}, <del> } { <del> help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) <del> } <del> fmt.Fprintf(cli.err, "%s\n", help) <add> <add> flag.Usage() <add> <ide> return nil <ide> } <ide> <ide><path>docker/flags.go <ide> package main <ide> <ide> import ( <add> "fmt" <ide> "os" <ide> "path/filepath" <ide> <add> "github.com/docker/docker/api" <ide> "github.com/docker/docker/opts" <ide> flag "github.com/docker/docker/pkg/mflag" <ide> ) <ide> func init() { <ide> flCert = flag.String([]string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file") <ide> flKey = flag.String([]string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file") <ide> opts.HostListVar(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.") <add> <add> flag.Usage = func() { <add> fmt.Fprintf(os.Stderr, "Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nOptions:\n", api.DEFAULTUNIXSOCKET) <add> <add> flag.PrintDefaults() <add> <add> help := "\nCommands:\n" <add> <add> for _, command := range [][]string{ <add> {"attach", "Attach to a running container"}, <add> {"build", "Build an image from a Dockerfile"}, <add> {"commit", "Create a new image from a container's changes"}, <add> {"cp", "Copy files/folders from a container's filesystem to the host path"}, <add> {"diff", "Inspect changes on a container's filesystem"}, <add> {"events", "Get real time events from the server"}, <add> {"export", "Stream the contents of a container as a tar archive"}, <add> {"history", "Show the history of an image"}, <add> {"images", "List images"}, <add> {"import", "Create a new filesystem image from the contents of a tarball"}, <add> {"info", "Display system-wide information"}, <add> {"inspect", "Return low-level information on a container"}, <add> {"kill", "Kill a running container"}, <add> {"load", "Load an image from a tar archive"}, <add> {"login", "Register or log in to a Docker registry server"}, <add> {"logout", "Log out from a Docker registry server"}, <add> {"logs", "Fetch the logs of a container"}, <add> {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"}, <add> {"pause", "Pause all processes within a container"}, <add> {"ps", "List containers"}, <add> {"pull", "Pull an image or a repository from a Docker registry server"}, <add> {"push", "Push an image or a repository to a Docker registry server"}, <add> {"restart", "Restart a running container"}, <add> {"rm", "Remove one or more containers"}, <add> {"rmi", "Remove one or more images"}, <add> {"run", "Run a command in a new container"}, <add> {"save", "Save an image to a tar archive"}, <add> {"search", "Search for an image on the Docker Hub"}, <add> {"start", "Start a stopped container"}, <add> {"stop", "Stop a running container"}, <add> {"tag", "Tag an image into a repository"}, <add> {"top", "Lookup the running processes of a container"}, <add> {"unpause", "Unpause a paused container"}, <add> {"version", "Show the Docker version information"}, <add> {"wait", "Block until a container stops, then print its exit code"}, <add> } { <add> help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) <add> } <add> fmt.Fprintf(os.Stderr, "%s\n", help) <add> } <ide> }
2
Text
Text
remove stability warning in v8 module doc
e54c8b788d64d6388157434f10888358b3f7ab12
<ide><path>doc/api/v8.md <ide> built into the Node.js binary. It can be accessed using: <ide> const v8 = require('v8'); <ide> ``` <ide> <del>The APIs and implementation are subject to change at any time. <del> <ide> ## `v8.cachedDataVersionTag()` <ide> <!-- YAML <ide> added: v8.0.0
1
Go
Go
support both endpoint modes in stack
96790657288699146f579382f25d932c87125e86
<ide><path>cli/compose/convert/service.go <ide> import ( <ide> "fmt" <ide> "os" <ide> "sort" <add> "strings" <ide> "time" <ide> <ide> "github.com/docker/docker/api/types" <ide> func convertService( <ide> ) (swarm.ServiceSpec, error) { <ide> name := namespace.Scope(service.Name) <ide> <del> endpoint, err := convertEndpointSpec(service.Ports) <add> endpoint, err := convertEndpointSpec(service.EndpointMode, service.Ports) <ide> if err != nil { <ide> return swarm.ServiceSpec{}, err <ide> } <ide> func (a byPublishedPort) Len() int { return len(a) } <ide> func (a byPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } <ide> func (a byPublishedPort) Less(i, j int) bool { return a[i].PublishedPort < a[j].PublishedPort } <ide> <del>func convertEndpointSpec(source []composetypes.ServicePortConfig) (*swarm.EndpointSpec, error) { <add>func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) (*swarm.EndpointSpec, error) { <ide> portConfigs := []swarm.PortConfig{} <ide> for _, port := range source { <ide> portConfig := swarm.PortConfig{ <ide> func convertEndpointSpec(source []composetypes.ServicePortConfig) (*swarm.Endpoi <ide> } <ide> <ide> sort.Sort(byPublishedPort(portConfigs)) <del> return &swarm.EndpointSpec{Ports: portConfigs}, nil <add> return &swarm.EndpointSpec{ <add> Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)), <add> Ports: portConfigs, <add> }, nil <ide> } <ide> <ide> func convertEnvironment(source map[string]string) []string { <ide><path>cli/compose/convert/service_test.go <ide> func TestConvertEndpointSpec(t *testing.T) { <ide> Published: 80, <ide> }, <ide> } <del> endpoint, err := convertEndpointSpec(source) <add> endpoint, err := convertEndpointSpec("vip", source) <ide> <ide> expected := swarm.EndpointSpec{ <add> Mode: swarm.ResolutionMode(strings.ToLower("vip")), <ide> Ports: []swarm.PortConfig{ <ide> { <ide> TargetPort: 8080, <ide><path>cli/compose/types/types.go <ide> type ServiceConfig struct { <ide> DNS StringList <ide> DNSSearch StringList `mapstructure:"dns_search"` <ide> DomainName string `mapstructure:"domainname"` <add> EndpointMode string <ide> Entrypoint ShellCommand <ide> Environment MappingWithEquals <ide> EnvFile StringList `mapstructure:"env_file"`
3
Java
Java
fix checkstyle violation
47d60b34f817d7ab6c95ea0af5871b1fce3ffb92
<ide><path>spring-messaging/src/main/java/org/springframework/messaging/handler/annotation/MessageMapping.java <ide> * a specialization such as <ide> * {@link org.springframework.messaging.simp.SimpMessageHeaderAccessor <ide> * SimpMessageHeaderAccessor}.</li> <del> * <li>{@link Message Message<T>} for access to body and headers with the body <add> * <li>{@link Message Message&lt;T&gt;} for access to body and headers with the body <ide> * de-serialized if necessary to match the declared type.</li> <ide> * <li>{@link java.security.Principal} method arguments are supported in <ide> * some processing scenarios such as STOMP over WebSocket. It reflects the
1
Mixed
Ruby
allow `host` option in javscript and css helpers
6b77df0ade304e3856c8c07c93e2a8cde3c040f0
<ide><path>actionview/CHANGELOG.md <add>* Allow `host` option in `javascript_include_tag` and `stylesheet_tag` helpers <add> <add> *Grzegorz Witek* <add> <ide> * Restrict `url_for :back` to valid, non-JavaScript URLs. GH#14444 <ide> <ide> *Damien Burke* <ide><path>actionview/lib/action_view/helpers/asset_tag_helper.rb <ide> module AssetTagHelper <ide> # # => <script src="http://www.example.com/xmlhr.js"></script> <ide> def javascript_include_tag(*sources) <ide> options = sources.extract_options!.stringify_keys <del> path_options = options.extract!('protocol', 'extname').symbolize_keys <add> path_options = options.extract!('protocol', 'extname', 'host').symbolize_keys <ide> sources.uniq.map { |source| <ide> tag_options = { <ide> "src" => path_to_javascript(source, path_options) <ide> def javascript_include_tag(*sources) <ide> # # <link href="/css/stylish.css" media="screen" rel="stylesheet" /> <ide> def stylesheet_link_tag(*sources) <ide> options = sources.extract_options!.stringify_keys <del> path_options = options.extract!('protocol').symbolize_keys <add> path_options = options.extract!('protocol', 'host').symbolize_keys <ide> <ide> sources.uniq.map { |source| <ide> tag_options = { <ide><path>actionview/test/template/asset_tag_helper_test.rb <ide> def url_for(*args) <ide> %(javascript_include_tag("bank")) => %(<script src="/javascripts/bank.js" ></script>), <ide> %(javascript_include_tag("bank.js")) => %(<script src="/javascripts/bank.js" ></script>), <ide> %(javascript_include_tag("bank", :lang => "vbscript")) => %(<script lang="vbscript" src="/javascripts/bank.js" ></script>), <add> %(javascript_include_tag("bank", :host => "assets.example.com")) => %(<script src="http://assets.example.com/javascripts/bank.js"></script>), <ide> <ide> %(javascript_include_tag("http://example.com/all")) => %(<script src="http://example.com/all"></script>), <ide> %(javascript_include_tag("http://example.com/all.js")) => %(<script src="http://example.com/all.js"></script>), <ide> def url_for(*args) <ide> %(stylesheet_link_tag("/elsewhere/file")) => %(<link href="/elsewhere/file.css" media="screen" rel="stylesheet" />), <ide> %(stylesheet_link_tag("subdir/subdir")) => %(<link href="/stylesheets/subdir/subdir.css" media="screen" rel="stylesheet" />), <ide> %(stylesheet_link_tag("bank", :media => "all")) => %(<link href="/stylesheets/bank.css" media="all" rel="stylesheet" />), <add> %(stylesheet_link_tag("bank", :host => "assets.example.com")) => %(<link href="http://assets.example.com/stylesheets/bank.css" media="screen" rel="stylesheet" />), <ide> <ide> %(stylesheet_link_tag("http://www.example.com/styles/style")) => %(<link href="http://www.example.com/styles/style" media="screen" rel="stylesheet" />), <ide> %(stylesheet_link_tag("http://www.example.com/styles/style.css")) => %(<link href="http://www.example.com/styles/style.css" media="screen" rel="stylesheet" />),
3
Javascript
Javascript
add information to assertion
b303d8bb21a6636555df9c5d41824071fd6c109b
<ide><path>test/parallel/test-fs-stat-bigint.js <ide> const promiseFs = require('fs').promises; <ide> const path = require('path'); <ide> const tmpdir = require('../common/tmpdir'); <ide> const { isDate } = require('util').types; <add>const { inspect } = require('util'); <ide> <ide> tmpdir.refresh(); <ide> <ide> function verifyStats(bigintStats, numStats) { <ide> assert.strictEqual(bigintStats[key], undefined); <ide> assert.strictEqual(numStats[key], undefined); <ide> } else if (Number.isSafeInteger(val)) { <del> assert.strictEqual(bigintStats[key], BigInt(val)); <add> assert.strictEqual( <add> bigintStats[key], BigInt(val), <add> `${inspect(bigintStats[key])} !== ${inspect(BigInt(val))}\n` + <add> `key=${key}, val=${val}` <add> ); <ide> } else { <ide> assert( <ide> Math.abs(Number(bigintStats[key]) - val) < 1,
1
Javascript
Javascript
fix lint errors including use of `global`
94d2bbb2214f3d88ac6cc1b1c48a59b377222727
<ide><path>src/core/ReactDOMSelection.js <ide> function getIESelection(node) { <ide> * @param {DOMElement} node <ide> */ <ide> function getModernSelection(node) { <del> var selection = global.getSelection(); <add> var selection = window.getSelection(); <ide> var anchorNode = selection.anchorNode; <ide> var anchorOffset = selection.anchorOffset; <ide> var focusNode = selection.focusNode; <ide> function setIESelection(node, offsets) { <ide> * @param {object} offsets <ide> */ <ide> function setModernSelection(node, offsets) { <del> var selection = global.getSelection(); <add> var selection = window.getSelection(); <ide> <ide> var length = node[getTextContentAccessor()].length; <ide> var start = Math.min(offsets.start, length); <ide><path>src/dom/components/ReactDOMInput.js <ide> var ReactDOMInput = ReactCompositeComponent.createClass({ <ide> this.props.checked != null ? this.props.checked : this.state.checked; <ide> <ide> var value = this.getValue(); <del> props.value = value != null && value !== false <del> ? '' + value <del> : this.state.value; <add> props.value = value != null && value !== false ? <add> '' + value : <add> this.state.value; <ide> <ide> props.onChange = this._handleChange; <ide>
2
Javascript
Javascript
add storedat date for serialized environment state
85b32b861ed2f6463ef640a4c4bd1ffb9e55783c
<ide><path>src/state-store.js <ide> class StateStore { <ide> <ide> save (key, value) { <ide> return this.dbPromise.then(db => { <add> value.storedAt = new Date().toString() <ide> return new Promise((resolve, reject) => { <ide> var request = db.transaction(['states'], 'readwrite') <ide> .objectStore('states')
1
PHP
PHP
use correct method for replacement of url property
eb7b2dc197d62a866085b13ee01c2026c09e9601
<ide><path>src/Http/ServerRequest.php <ide> class ServerRequest implements ArrayAccess, ServerRequestInterface <ide> * The URL string used for the request. <ide> * <ide> * @var string <del> * @deprecated 3.6.0 This public property will be removed in 4.0.0. Use getRequestTarget() instead. <add> * @deprecated 3.6.0 This public property will be removed in 4.0.0. Use getPath() instead. <ide> */ <ide> protected $url; <ide>
1
Javascript
Javascript
remove need for state.id
c5024cdace6d9f11371ebab6462cc7020d4d9938
<ide><path>src/renderers/WebGLRenderer.js <ide> function WebGLRenderer( parameters ) { <ide> var lights = currentRenderState.state.lights; <ide> var shadowsArray = currentRenderState.state.shadowsArray; <ide> <del> var lightsHash = materialProperties.lightsHash; <del> var lightsStateHash = lights.state.hash; <add> var lightsStateVersion = lights.state.version; <ide> <ide> var parameters = programCache.getParameters( <ide> material, lights.state, shadowsArray, fog, _clipping.numPlanes, _clipping.numIntersection, object ); <ide> function WebGLRenderer( parameters ) { <ide> // changed glsl or parameters <ide> releaseMaterialProgramReference( material ); <ide> <del> } else if ( lightsHash.stateID !== lightsStateHash.stateID || <del> lightsHash.version !== lightsStateHash.version ) { <add> } else if ( materialProperties.lightsStateVersion !== lightsStateVersion ) { <ide> <del> lightsHash.stateID = lightsStateHash.stateID; <del> lightsHash.version = lightsStateHash.version; <add> materialProperties.lightsStateVersion = lightsStateVersion; <ide> <ide> programChange = false; <ide> <ide> function WebGLRenderer( parameters ) { <ide> materialProperties.fog = fog; <ide> <ide> // store the light setup it was created for <del> if ( lightsHash === undefined ) { <ide> <del> materialProperties.lightsHash = lightsHash = {}; <del> <del> } <del> <del> lightsHash.stateID = lightsStateHash.stateID; <del> lightsHash.version = lightsStateHash.version; <add> materialProperties.lightsStateVersion = lightsStateVersion; <ide> <ide> if ( material.lights ) { <ide> <ide> function WebGLRenderer( parameters ) { <ide> var materialProperties = properties.get( material ); <ide> var lights = currentRenderState.state.lights; <ide> <del> var lightsHash = materialProperties.lightsHash; <del> var lightsStateHash = lights.state.hash; <del> <ide> if ( _clippingEnabled ) { <ide> <ide> if ( _localClippingEnabled || camera !== _currentCamera ) { <ide> function WebGLRenderer( parameters ) { <ide> <ide> material.needsUpdate = true; <ide> <del> } else if ( material.lights && ( lightsHash.stateID !== lightsStateHash.stateID || <del> lightsHash.version !== lightsStateHash.version ) ) { <add> } else if ( material.lights && materialProperties.lightsStateVersion !== lights.state.version ) { <ide> <ide> material.needsUpdate = true; <ide> <ide><path>src/renderers/webgl/WebGLLights.js <ide> function UniformsCache() { <ide> <ide> } <ide> <del>var count = 0; <add>var nextVersion = 0; <ide> <ide> function WebGLLights() { <ide> <ide> var cache = new UniformsCache(); <ide> <ide> var state = { <ide> <del> id: count ++, <add> version: 0, <ide> <ide> hash: { <del> stateID: - 1, <ide> directionalLength: - 1, <ide> pointLength: - 1, <ide> spotLength: - 1, <ide> rectAreaLength: - 1, <ide> hemiLength: - 1, <ide> shadowsLength: - 1, <del> version: 0 <ide> }, <ide> <ide> ambient: [ 0, 0, 0 ], <ide> function WebGLLights() { <ide> state.point.length = pointLength; <ide> state.hemi.length = hemiLength; <ide> <del> hash.stateID = state.id; <ide> hash.directionalLength = directionalLength; <ide> hash.pointLength = pointLength; <ide> hash.spotLength = spotLength; <ide> hash.rectAreaLength = rectAreaLength; <ide> hash.hemiLength = hemiLength; <ide> hash.shadowsLength = shadows.length; <ide> <del> hash.version ++; <add> state.version = nextVersion++; <ide> <ide> } <ide>
2