text stringlengths 2 1.04M | meta dict |
|---|---|
// Disable Windows complaining about max template size.
#pragma warning (disable : 4503)
#endif // _MSC_VER
#if defined (_WIN32)
#ifdef _MSC_VER
#pragma warning (disable : 4251)
#endif // _MSC_VER
#ifdef USE_IMPORT_EXPORT
#ifdef AWSDOC_S3ENCRYPTION_EXPORTS
#define AWSDOC_S3ENCRYPTION_API __declspec(dllexport)
#else
#define AWSDOC_S3ENCRYPTION_API __declspec(dllimport)
#endif // AWSDOC_S3_EXPORTS
#else
#define AWSDOC_S3ENCRYPTION_API
#endif // USE_IMPORT_EXPORT
#else // defined (WIN32)
#define AWSDOC_S3ENCRYPTION_API
#endif // defined (WIN32)
| {
"content_hash": "e35642758c3acf9c4de6dc277fb96ded",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 56,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7195121951219512,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "39b46f5265879f2c527a69e31a7625159d6d3187",
"size": "714",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cpp/example_code/s3encryption/include/awsdoc/s3-encryption/s3Encryption_EXPORTS.h",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
import { BaseListener } from '../../types'
export type OnFrameDeletedRecorder = []
/**
* Called when a frame is deleted
*/
export type Shape = BaseListener<OnFrameDeletedRecorder>
| {
"content_hash": "31b07fd6e0aab43b88844939f741c9ff",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 23,
"alnum_prop": 0.7228260869565217,
"repo_name": "SeleniumHQ/selenium-ide",
"id": "9c7499b98dc98d476afe8565e8c0f046dc55485e",
"size": "184",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "packages/side-api/src/commands/recorder/onFrameDeleted.ts",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5026"
},
{
"name": "HTML",
"bytes": "49034"
},
{
"name": "JavaScript",
"bytes": "667308"
},
{
"name": "Starlark",
"bytes": "6300"
},
{
"name": "TypeScript",
"bytes": "693721"
}
],
"symlink_target": ""
} |
package org.orekit.files.ccsds.ndm.tdm;
import org.hamcrest.CoreMatchers;
import org.hamcrest.MatcherAssert;
import org.hipparchus.util.FastMath;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.orekit.Utils;
import org.orekit.data.DataSource;
import org.orekit.errors.OrekitException;
import org.orekit.errors.OrekitMessages;
import org.orekit.files.ccsds.definitions.CelestialBodyFrame;
import org.orekit.files.ccsds.definitions.TimeSystem;
import org.orekit.files.ccsds.ndm.ParserBuilder;
import org.orekit.files.ccsds.ndm.WriterBuilder;
import org.orekit.files.ccsds.utils.generation.Generator;
import org.orekit.files.ccsds.utils.generation.KvnGenerator;
import org.orekit.frames.FramesFactory;
import org.orekit.time.AbsoluteDate;
import org.orekit.time.TimeScale;
import org.orekit.time.TimeScalesFactory;
import org.orekit.utils.Constants;
import java.io.ByteArrayInputStream;
import java.io.CharArrayWriter;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Test class for CCSDS Tracking Data Message parsing.<p>
* Examples are taken from Annexe D of
* <a href="https://public.ccsds.org/Pubs/503x0b1c1.pdf">CCSDS 503.0-B-1 recommended standard [1]</a> ("Tracking Data Message", Blue Book, Version 1.0, November 2007).<p>
* Both KeyValue and XML formats are tested here on equivalent files.
* @author mjournot
*
*/
public class TdmParserTest {
@BeforeEach
public void setUp() {
Utils.setDataRoot("regular-data");
}
@Test
public void testParseTdmExternalResourceIssue368() {
// setup
final String name = "/ccsds/tdm/xml/TDM-external-doctype.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
// action
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
// verify
Assertions.fail("Expected Exception");
} catch (OrekitException e) {
// Malformed URL exception indicates external resource was disabled
// file not found exception indicates parser tried to load the resource
MatcherAssert.assertThat(e.getCause(),
CoreMatchers.instanceOf(MalformedURLException.class));
}
}
@Test
public void testParseTdmKeyValueExample2() {
// Example 2 of [1]
// See Figure D-2: TDM Example: One-Way Data w/Frequency Offset
// Data lines number was cut down to 7
final String name = "/ccsds/tdm/kvn/TDMExample2.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample2(file);
}
@Test
public void testParseTdmKeyValueExample4() {
// Example 4 of [1]
// See Figure D-4: TDM Example: Two-Way Ranging Data Only
// Data lines number was cut down to 20
final String name = "/ccsds/tdm/kvn/TDMExample4.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExample4(file);
}
@Test
public void testParseTdmKeyValueExample6() {
// Example 6 of [1]
// See Figure D-6: TDM Example: Four-Way Data
// Data lines number was cut down to 16
final String name = "/ccsds/tdm/kvn/TDMExample6.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample6(file);
}
@Test
public void testParseTdmKeyValueExample8() {
// Example 8 of [1]
// See Figure D-8: TDM Example: Angles, Range, Doppler Combined in Single TDM
// Data lines number was cut down to 18
final String name = "/ccsds/tdm/kvn/TDMExample8.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExample8(file);
}
@Test
public void testParseTdmKeyValueExample15() {
// Example 15 of [1]
// See Figure D-15: TDM Example: Clock Bias/Drift Only
final String name = "/ccsds/tdm/kvn/TDMExample15.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample15(file);
}
@Test
public void testParseTdmKeyValueExampleAllKeywordsSequential() {
// Testing all TDM keywords
final String name = "/ccsds/tdm/kvn/TDMExampleAllKeywordsSequential.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExampleAllKeywordsSequential(file);
}
@Test
public void testParseTdmKeyValueExampleAllKeywordsSingleDiff() {
// Testing all TDM keywords
final String name = "/ccsds/tdm/kvn/TDMExampleAllKeywordsSingleDiff.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExampleAllKeywordsSingleDiff(file);
}
@Test
public void testParseTdmXmlExample2() {
// Example 2 of [1]
// See Figure D-2: TDM Example: One-Way Data w/Frequency Offset
// Data lines number was cut down to 7
final String name = "/ccsds/tdm/xml/TDMExample2.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample2(file);
}
@Test
public void testWriteTdmXmlExample2() throws IOException {
// Example 2 of [1]
// See Figure D-2: TDM Example: One-Way Data w/Frequency Offset
// Data lines number was cut down to 7
final String name = "/ccsds/tdm/xml/TDMExample2.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm original = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
// write the parsed file back to a characters array
final CharArrayWriter caw = new CharArrayWriter();
final Generator generator = new KvnGenerator(caw, TdmWriter.KVN_PADDING_WIDTH, "dummy", 60);
new WriterBuilder().withRangeUnitsConverter(null).buildTdmWriter().writeMessage(generator, original);
// reparse the written file
final byte[] bytes = caw.toString().getBytes(StandardCharsets.UTF_8);
final DataSource source2 = new DataSource(name, () -> new ByteArrayInputStream(bytes));
final Tdm rebuilt = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source2);
validateTDMExample2(rebuilt);
}
@Test
public void testParseTdmXmlExample4() {
// Example 4 of [1]
// See Figure D-4: TDM Example: Two-Way Ranging Data Only
// Data lines number was cut down to 20
final String name = "/ccsds/tdm/xml/TDMExample4.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExample4(file);
}
@Test
public void testParseTdmXmlExample6() {
// Example 6 of [1]
// See Figure D-6: TDM Example: Four-Way Data
// Data lines number was cut down to 16
final String name = "/ccsds/tdm/xml/TDMExample6.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample6(file);
}
@Test
public void testParseTdmXmlExample8() {
// Example 8 of [1]
// See Figure D-8: TDM Example: Angles, Range, Doppler Combined in Single TDM
// Data lines number was cut down to 18
final String name = "/ccsds/tdm/xml/TDMExample8.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExample8(file);
}
@Test
public void testParseTdmXmlExample15() {
// Example 15 of [1]
// See Figure D-15: TDM Example: Clock Bias/Drift Only
final String name = "/ccsds/tdm/xml/TDMExample15.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
validateTDMExample15(file);
}
@Test
public void testIssue963() {
// Check that a TDM with spaces in between participants in PATH is rejected
final String name = "/ccsds/tdm/kvn/TDM-issue963.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
// Number format exception in metadata part
new ParserBuilder().buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
}
}
@Test
public void testParseTdmXmlExampleAllKeywordsSequential() {
// Testing all TDM keywords
final String name = "/ccsds/tdm/xml/TDMExampleAllKeywordsSequential.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExampleAllKeywordsSequential(file);
}
@Test
public void testParseTdmXmlExampleAllKeywordsSingleDiff() {
// Testing all TDM keywords
final String name = "/ccsds/tdm/xml/TDMExampleAllKeywordsSingleDiff.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
final Tdm file = new ParserBuilder().buildTdmParser().parseMessage(source);
validateTDMExampleAllKeywordsSingleDiff(file);
}
@Test
public void testDataNumberFormatErrorTypeKeyValue() {
final String name = "/ccsds/tdm/kvn/TDM-data-number-format-error.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
// Number format exception in data part
new ParserBuilder().buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("RECEIVE_FREQ_1", oe.getParts()[0]);
Assertions.assertEquals(26, oe.getParts()[1]);
Assertions.assertEquals(name, oe.getParts()[2]);
}
}
@Test
public void testDataNumberFormatErrorTypeXml() {
try {
// Number format exception in data part
final String name = "/ccsds/tdm/xml/TDM-data-number-format-error.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
new ParserBuilder().buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("RECEIVE_FREQ_1", oe.getParts()[0]);
Assertions.assertEquals(47, oe.getParts()[1]);
Assertions.assertEquals("/ccsds/tdm/xml/TDM-data-number-format-error.xml", oe.getParts()[2]);
}
}
@Test
public void testMetaDataNumberFormatErrorTypeKeyValue() {
try {
// Number format exception in metadata part
final String name = "/ccsds/tdm/kvn/TDM-metadata-number-format-error.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An Orekit Exception \"UNABLE_TO_PARSE_LINE_IN_FILE\" should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("TRANSMIT_DELAY_1", oe.getParts()[0]);
Assertions.assertEquals(17, oe.getParts()[1]);
Assertions.assertEquals("/ccsds/tdm/kvn/TDM-metadata-number-format-error.txt", oe.getParts()[2]);
}
}
@Test
public void testMetaDataNumberFormatErrorTypeXml() {
try {
// Number format exception in metadata part
final String name = "/ccsds/tdm/xml/TDM-metadata-number-format-error.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("TRANSMIT_DELAY_1", oe.getParts()[0]);
Assertions.assertEquals(24, oe.getParts()[1]);
Assertions.assertEquals("/ccsds/tdm/xml/TDM-metadata-number-format-error.xml", oe.getParts()[2]);
}
}
@Test
public void testNonExistentFile() throws URISyntaxException {
// Try parsing a file that does not exist
final String realName = "/ccsds/odm/oem/OEMExample2.txt";
final String wrongName = realName + "xxxxx";
final DataSource source = new DataSource(wrongName, () -> TdmParserTest.class.getResourceAsStream(wrongName));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_FIND_FILE, oe.getSpecifier());
Assertions.assertEquals(wrongName, oe.getParts()[0]);
}
}
@Test
public void testInconsistentTimeSystemsKeyValue() {
// Inconsistent time systems between two sets of data
final String name = "/ccsds/tdm/kvn/TDM-inconsistent-time-systems.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.assertEquals(3, file.getSegments().size());
Assertions.assertEquals(TimeSystem.UTC, file.getSegments().get(0).getMetadata().getTimeSystem());
Assertions.assertEquals(TimeSystem.TCG, file.getSegments().get(1).getMetadata().getTimeSystem());
Assertions.assertEquals(TimeSystem.UTC, file.getSegments().get(2).getMetadata().getTimeSystem());
}
@Test
public void testInconsistentTimeSystemsXml() {
// Inconsistent time systems between two sets of data
final String name = "/ccsds/tdm/xml/TDM-inconsistent-time-systems.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
Tdm file = new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.assertEquals(3, file.getSegments().size());
Assertions.assertEquals(TimeSystem.UTC, file.getSegments().get(0).getMetadata().getTimeSystem());
Assertions.assertEquals(TimeSystem.TCG, file.getSegments().get(1).getMetadata().getTimeSystem());
Assertions.assertEquals(TimeSystem.UTC, file.getSegments().get(2).getMetadata().getTimeSystem());
}
@Test
public void testWrongDataKeywordKeyValue() throws URISyntaxException {
// Unknown CCSDS keyword was read in data part
final String name = "/ccsds/tdm/kvn/TDM-data-wrong-keyword.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_UNEXPECTED_KEYWORD, oe.getSpecifier());
Assertions.assertEquals(26, oe.getParts()[0]);
Assertions.assertEquals("/ccsds/tdm/kvn/TDM-data-wrong-keyword.txt", oe.getParts()[1], "%s");
Assertions.assertEquals("WRONG_KEYWORD", oe.getParts()[2]);
}
}
@Test
public void testWrongDataKeywordXml() throws URISyntaxException {
// Unknown CCSDS keyword was read in data part
final String name = "/ccsds/tdm/xml/TDM-data-wrong-keyword.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_UNEXPECTED_KEYWORD, oe.getSpecifier());
Assertions.assertEquals(47, oe.getParts()[0]);
Assertions.assertEquals(name, oe.getParts()[1]);
Assertions.assertEquals("WRONG_KEYWORD", oe.getParts()[2]);
}
}
@Test
public void testWrongMetaDataKeywordKeyValue() throws URISyntaxException {
// Unknown CCSDS keyword was read in data part
final String name = "/ccsds/tdm/kvn/TDM-metadata-wrong-keyword.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_UNEXPECTED_KEYWORD, oe.getSpecifier());
Assertions.assertEquals(16, oe.getParts()[0]);
Assertions.assertEquals("/ccsds/tdm/kvn/TDM-metadata-wrong-keyword.txt", oe.getParts()[1]);
Assertions.assertEquals("WRONG_KEYWORD", oe.getParts()[2]);
}
}
@Test
public void testWrongMetaDataKeywordXml() throws URISyntaxException {
// Unknown CCSDS keyword was read in data part
final String name = "/ccsds/tdm/xml/TDM-metadata-wrong-keyword.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_UNEXPECTED_KEYWORD, oe.getSpecifier());
Assertions.assertEquals(23, oe.getParts()[0]);
Assertions.assertEquals("/ccsds/tdm/xml/TDM-metadata-wrong-keyword.xml", oe.getParts()[1]);
Assertions.assertEquals("WRONG_KEYWORD", oe.getParts()[2]);
}
}
@Test
public void testWrongTimeSystemKeyValue() {
// Time system not implemented CCSDS keyword was read in data part
final String name = "/ccsds/tdm/kvn/TDM-metadata-timesystem-not-implemented.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_TIME_SYSTEM_NOT_IMPLEMENTED, oe.getSpecifier());
Assertions.assertEquals("WRONG-TIME-SYSTEM", oe.getParts()[0]);
}
}
@Test
public void testWrongTimeSystemXml() {
// Time system not implemented CCSDS keyword was read in data part
final String name = "/ccsds/tdm/xml/TDM-metadata-timesystem-not-implemented.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_TIME_SYSTEM_NOT_IMPLEMENTED, oe.getSpecifier());
Assertions.assertEquals("WRONG-TIME-SYSTEM", oe.getParts()[0]);
}
}
@Test
public void testMissingTimeSystemXml() {
// Time system not implemented CCSDS keyword was read in data part
final String name = "/ccsds/tdm/xml/TDM-missing-timesystem.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.CCSDS_TIME_SYSTEM_NOT_READ_YET, oe.getSpecifier());
Assertions.assertEquals(18, oe.getParts()[0]);
}
}
@Test
public void testMissingPArticipants() {
final String name = "/ccsds/tdm/xml/TDM-missing-participants.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNINITIALIZED_VALUE_FOR_KEY, oe.getSpecifier());
Assertions.assertEquals(TdmMetadataKey.PARTICIPANT_1, oe.getParts()[0]);
}
}
@Test
public void testInconsistentDataLineKeyValue() {
// Inconsistent data line in KeyValue file (3 fields after keyword instead of 2)
final String name = "/ccsds/tdm/kvn/TDM-data-inconsistent-line.txt";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("RECEIVE_FREQ_1", oe.getParts()[0]);
Assertions.assertEquals(25, oe.getParts()[1]);
Assertions.assertEquals("/ccsds/tdm/kvn/TDM-data-inconsistent-line.txt", oe.getParts()[2]);
}
}
@Test
public void testInconsistentDataBlockXml() {
// Inconsistent data block in XML file
final String name = "/ccsds/tdm/xml/TDM-data-inconsistent-block.xml";
final DataSource source = new DataSource(name, () -> TdmParserTest.class.getResourceAsStream(name));
try {
new ParserBuilder().withRangeUnitsConverter(null).buildTdmParser().parseMessage(source);
Assertions.fail("An exception should have been thrown");
} catch (OrekitException oe) {
Assertions.assertEquals(OrekitMessages.UNABLE_TO_PARSE_ELEMENT_IN_FILE, oe.getSpecifier());
Assertions.assertEquals("TRANSMIT_FREQ_2", oe.getParts()[0]);
Assertions.assertEquals(32, oe.getParts()[1]);
Assertions.assertEquals("/ccsds/tdm/xml/TDM-data-inconsistent-block.xml", oe.getParts()[2]);
}
}
/**
* Validation function for example 2.
* @param file Parsed TDM to validate
*/
private void validateTDMExample2(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(1.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-160T20:15:00", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("NASA/JPL",file.getHeader().getOriginator());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("TDM example created by yyyyy-nnnA Nav Team (NASA/JPL)");
headerComment.add("StarTrek 1-way data, Ka band down");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(0.0, new AbsoluteDate("2005-159T17:41:00", utc).durationFrom(metadata.getStartTime()), 0.0);
Assertions.assertEquals(0.0, new AbsoluteDate("2005-159T17:41:40", utc).durationFrom(metadata.getStopTime()), 0.0);
Assertions.assertEquals("DSS-25", metadata.getParticipants().get(1));
Assertions.assertEquals("yyyy-nnnA", metadata.getParticipants().get(2));
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 2, 1 }, metadata.getPath());
Assertions.assertEquals(1.0, metadata.getIntegrationInterval(), 0.0);
Assertions.assertEquals(IntegrationReference.MIDDLE, metadata.getIntegrationRef());
Assertions.assertEquals(32021035200.0, metadata.getFreqOffset(), 0.0);
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(1), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(1), 0.0);
Assertions.assertEquals(DataQuality.RAW, metadata.getDataQuality());
final List<String> metaDataComment = new ArrayList<String>();
metaDataComment.add("This is a meta-data comment");
Assertions.assertEquals(metaDataComment, metadata.getComments());
// Data
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data
final String[] keywords = {"TRANSMIT_FREQ_2", "RECEIVE_FREQ_1", "RECEIVE_FREQ_1", "RECEIVE_FREQ_1",
"RECEIVE_FREQ_1", "RECEIVE_FREQ_1", "RECEIVE_FREQ_1"};
final String[] epochs = {"2005-159T17:41:00", "2005-159T17:41:00", "2005-159T17:41:01", "2005-159T17:41:02",
"2005-159T17:41:03", "2005-159T17:41:04", "2005-159T17:41:05"};
final double[] values = {32023442781.733, -409.2735, -371.1568, -333.0551,
-294.9673, -256.9054, -218.7951};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values[i], observations.get(i).getMeasurement(), 0.0);
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("This is a data comment");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
// check so global setters that are not used by parser (it uses successive add instead)
metadata.setParticipants(Collections.singletonMap(12, "p12"));
Assertions.assertNull(metadata.getParticipants().get(1));
Assertions.assertEquals("p12", metadata.getParticipants().get(12));
metadata.setTransmitDelays(Collections.singletonMap(12, 1.25));
Assertions.assertNull(metadata.getTransmitDelays().get(1));
Assertions.assertEquals(1.25, metadata.getTransmitDelays().get(12).doubleValue(), 1.0e-15);
metadata.setReceiveDelays(Collections.singletonMap(12, 2.5));
Assertions.assertNull(metadata.getReceiveDelays().get(1));
Assertions.assertEquals(2.5, metadata.getReceiveDelays().get(12).doubleValue(), 1.0e-15);
}
/**
* Validation function for example 4.
* @param file Parsed TDM to validate
*/
private void validateTDMExample4(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(1.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-191T23:00:00", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("NASA/JPL",file.getHeader().getOriginator());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("TDM example created by yyyyy-nnnA Nav Team (NASA/JPL)");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals("DSS-24", metadata.getParticipants().get(1));
Assertions.assertEquals("yyyy-nnnA", metadata.getParticipants().get(2));
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 1, 2, 1 }, metadata.getPath());
Assertions.assertEquals(IntegrationReference.START, metadata.getIntegrationRef());
Assertions.assertEquals(RangeMode.COHERENT, metadata.getRangeMode());
Assertions.assertEquals(2.0e+26, metadata.getRawRangeModulus(), 0.0);
Assertions.assertEquals(2.0e+26, metadata.getRangeModulus(new IdentityConverter()), 0.0);
Assertions.assertEquals(RangeUnits.RU, metadata.getRangeUnits());
Assertions.assertEquals(7.7e-5, metadata.getTransmitDelays().get(1), 0.0);
Assertions.assertEquals(0.0, metadata.getTransmitDelays().get(2), 0.0);
Assertions.assertEquals(7.7e-5, metadata.getReceiveDelays().get(1), 0.0);
Assertions.assertEquals(0.0, metadata.getReceiveDelays().get(2), 0.0);
Assertions.assertEquals(46.7741, metadata.getCorrectionRange(new IdentityConverter()), 0.0);
Assertions.assertEquals(46.7741, metadata.getRawCorrectionRange(), 0.0);
Assertions.assertEquals(CorrectionApplied.YES, metadata.getCorrectionsApplied());
final List<String> metaDataComment = new ArrayList<String>();
metaDataComment.add("Range correction applied is range calibration to DSS-24.");
metaDataComment.add("Estimated RTLT at begin of pass = 950 seconds");
metaDataComment.add("Antenna Z-height correction 0.0545 km applied to uplink signal");
metaDataComment.add("Antenna Z-height correction 0.0189 km applied to downlink signal");
Assertions.assertEquals(metaDataComment, metadata.getComments());
// Data
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data
final String[] keywords = {"TRANSMIT_FREQ_1", "TRANSMIT_FREQ_RATE_1", "RANGE", "PR_N0",
"TRANSMIT_FREQ_1", "TRANSMIT_FREQ_RATE_1", "RANGE", "PR_N0",
"TRANSMIT_FREQ_1", "TRANSMIT_FREQ_RATE_1", "RANGE", "PR_N0",
"TRANSMIT_FREQ_1", "TRANSMIT_FREQ_RATE_1", "RANGE", "PR_N0"};
final String[] epochs = {"2005-191T00:31:51", "2005-191T00:31:51", "2005-191T00:31:51", "2005-191T00:31:51",
"2005-191T00:34:48", "2005-191T00:34:48", "2005-191T00:34:48", "2005-191T00:34:48",
"2005-191T00:37:45", "2005-191T00:37:45", "2005-191T00:37:45", "2005-191T00:37:45",
"2005-191T00:40:42", "2005-191T00:40:42", "2005-191T00:40:42", "2005-191T00:40:42",
"2005-191T00:58:24", "2005-191T00:58:24", "2005-191T00:58:24", "2005-191T00:58:24"};
final double[] values = {7180064367.3536 , 0.59299, 39242998.5151986, 28.52538,
7180064472.3146 , 0.59305, 61172265.3115234, 28.39347,
7180064577.2756 , 0.59299, 15998108.8168328, 28.16193,
7180064682.2366 , 0.59299, 37938284.4138008, 29.44597,
7180065327.56141, 0.62085, 35478729.4012973, 30.48199};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values[i], observations.get(i).getMeasurement(), 0.0);
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("This is a data comment");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
}
/**
* Validation function for example 6.
* @param file Parsed TDM to validate
*/
private void validateTDMExample6(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(1.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("1998-06-10T01:00:00", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("JAXA",file.getHeader().getOriginator());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("TDM example created by yyyyy-nnnA Nav Team (JAXA)");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("1998-06-10T00:57:37", utc).durationFrom(metadata.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("1998-06-10T00:57:44", utc).durationFrom(metadata.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("NORTH", metadata.getParticipants().get(1));
Assertions.assertEquals("F07R07", metadata.getParticipants().get(2));
Assertions.assertEquals("E7", metadata.getParticipants().get(3));
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 1, 2, 3, 2, 1 }, metadata.getPath());
Assertions.assertEquals(1.0, metadata.getIntegrationInterval(), 0.0);
Assertions.assertEquals(IntegrationReference.MIDDLE, metadata.getIntegrationRef());
Assertions.assertEquals(RangeMode.CONSTANT, metadata.getRangeMode());
Assertions.assertEquals(1.0, metadata.getRawRangeModulus(), 0.0);
Assertions.assertEquals(1000.0, metadata.getRangeModulus(new IdentityConverter()), 0.0);
Assertions.assertEquals(RangeUnits.km, metadata.getRangeUnits());
Assertions.assertEquals(AngleType.AZEL, metadata.getAngleType());
Assertions.assertEquals(2.0, metadata.getRawCorrectionRange(), 0.0);
Assertions.assertEquals(2000.0, metadata.getCorrectionRange(new IdentityConverter()), 0.0);
Assertions.assertEquals(CorrectionApplied.YES, metadata.getCorrectionsApplied());
// Data
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data
final String[] keywords = {"RANGE", "ANGLE_1", "ANGLE_2", "TRANSMIT_FREQ_1", "RECEIVE_FREQ",
"RANGE", "ANGLE_1", "ANGLE_2", "TRANSMIT_FREQ_1", "RECEIVE_FREQ",
"RANGE", "ANGLE_1", "ANGLE_2", "TRANSMIT_FREQ_1", "RECEIVE_FREQ",
"RANGE", "ANGLE_1", "ANGLE_2", "TRANSMIT_FREQ_1", "RECEIVE_FREQ",};
final String[] epochs = {"1998-06-10T00:57:37", "1998-06-10T00:57:37", "1998-06-10T00:57:37", "1998-06-10T00:57:37", "1998-06-10T00:57:37",
"1998-06-10T00:57:38", "1998-06-10T00:57:38", "1998-06-10T00:57:38", "1998-06-10T00:57:38", "1998-06-10T00:57:38",
"1998-06-10T00:57:39", "1998-06-10T00:57:39", "1998-06-10T00:57:39", "1998-06-10T00:57:39", "1998-06-10T00:57:39",
"1998-06-10T00:57:44", "1998-06-10T00:57:44", "1998-06-10T00:57:44", "1998-06-10T00:57:44", "1998-06-10T00:57:44",};
final double[] values = { 80452754.2, FastMath.toRadians(256.64002393), FastMath.toRadians(13.38100016), 2106395199.07917, 2287487999.0,
80452736.8, FastMath.toRadians(256.64002393), FastMath.toRadians(13.38100016), 2106395199.07917, 2287487999.0,
80452719.7, FastMath.toRadians(256.64002393), FastMath.toRadians(13.38100016), 2106395199.07917, 2287487999.0,
80452633.1, FastMath.toRadians(256.64002393), FastMath.toRadians(13.38100016), 2106395199.07917, 2287487999.0};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values[i], observations.get(i).getMeasurement(), 1.0e-12 * FastMath.abs(values[i]));
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("This is a data comment");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
}
/**
* Validation function for example 8.
* @param file Parsed TDM to validate
*/
private void validateTDMExample8(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(1.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("2007-08-30T12:01:44.749", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("GSOC",file.getHeader().getOriginator());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("GEOSCX INP");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data 1
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2007-08-29T07:00:02.000", utc).durationFrom(metadata.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2007-08-29T14:00:02.000", utc).durationFrom(metadata.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("HBSTK", metadata.getParticipants().get(1));
Assertions.assertEquals("SAT", metadata.getParticipants().get(2));
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 1, 2, 1 }, metadata.getPath());
Assertions.assertEquals(1.0, metadata.getIntegrationInterval(), 0.0);
Assertions.assertEquals(IntegrationReference.END, metadata.getIntegrationRef());
Assertions.assertEquals(AngleType.XSYE, metadata.getAngleType());
Assertions.assertEquals(DataQuality.RAW, metadata.getDataQuality());
final List<String> metaDataComment = new ArrayList<String>();
metaDataComment.add("This is a meta-data comment");
Assertions.assertEquals(metaDataComment, metadata.getComments());
// Data 1
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data 1
final String[] keywords = {"DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2",
"DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2",
"DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2"};
final String[] epochs = {"2007-08-29T07:00:02.000", "2007-08-29T07:00:02.000", "2007-08-29T07:00:02.000",
"2007-08-29T08:00:02.000", "2007-08-29T08:00:02.000", "2007-08-29T08:00:02.000",
"2007-08-29T14:00:02.000", "2007-08-29T14:00:02.000", "2007-08-29T14:00:02.000"};
final double[] values = {-1498.776048, FastMath.toRadians(67.01312389), FastMath.toRadians(18.28395556),
-2201.305217, FastMath.toRadians(67.01982278), FastMath.toRadians(21.19609167),
929.545817, FastMath.toRadians(-89.35626083), FastMath.toRadians(2.78791667)};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values[i], observations.get(i).getMeasurement(), 1.0e-12 * FastMath.abs(values[i]));
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("This is a data comment");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
// Meta-Data 2
final TdmMetadata metadata2 = file.getSegments().get(1).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2007-08-29T06:00:02.000", utc).durationFrom(metadata2.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2007-08-29T13:00:02.000", utc).durationFrom(metadata2.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("WHM1", metadata2.getParticipants().get(1));
Assertions.assertEquals("SAT", metadata2.getParticipants().get(2));
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata2.getMode());
Assertions.assertArrayEquals(new int[] { 1, 2, 1 }, metadata2.getPath());
Assertions.assertEquals(1.0, metadata2.getIntegrationInterval(), 0.0);
Assertions.assertEquals(IntegrationReference.END, metadata2.getIntegrationRef());
Assertions.assertEquals(1.0e7, metadata2.getRawRangeModulus(), 0.0);
Assertions.assertEquals(1.0e7 * Constants.SPEED_OF_LIGHT, metadata2.getRangeModulus(new IdentityConverter()), 0.0);
Assertions.assertEquals(RangeUnits.s, metadata2.getRangeUnits());
Assertions.assertEquals(AngleType.AZEL, metadata2.getAngleType());
Assertions.assertEquals(DataQuality.RAW, metadata2.getDataQuality());
Assertions.assertEquals(2.0, metadata2.getRawCorrectionRange(), 0.0);
Assertions.assertEquals(2.0 * Constants.SPEED_OF_LIGHT, metadata2.getCorrectionRange(new IdentityConverter()), 0.0);
Assertions.assertEquals(CorrectionApplied.YES, metadata2.getCorrectionsApplied());
final List<String> metaDataComment2 = new ArrayList<String>();
metaDataComment2.add("This is a meta-data comment");
Assertions.assertEquals(metaDataComment2, metadata2.getComments());
// Data 2
final List<Observation> observations2 = file.getSegments().get(1).getData().getObservations();
// Reference data 2
final String[] keywords2 = {"RANGE", "DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2",
"RANGE", "DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2",
"RANGE", "DOPPLER_INTEGRATED", "ANGLE_1", "ANGLE_2"};
final String[] epochs2 = {"2007-08-29T06:00:02.000", "2007-08-29T06:00:02.000", "2007-08-29T06:00:02.000", "2007-08-29T06:00:02.000",
"2007-08-29T07:00:02.000", "2007-08-29T07:00:02.000", "2007-08-29T07:00:02.000", "2007-08-29T07:00:02.000",
"2007-08-29T13:00:02.000", "2007-08-29T13:00:02.000", "2007-08-29T13:00:02.000", "2007-08-29T13:00:02.000"};
final double[] values2 = {4.00165248953670E+04 * Constants.SPEED_OF_LIGHT, -885.640091, FastMath.toRadians(99.53204250), FastMath.toRadians(1.26724167),
3.57238793591890E+04 * Constants.SPEED_OF_LIGHT, -1510.223139, FastMath.toRadians(103.33061750), FastMath.toRadians(4.77875278),
3.48156855860090E+04 * Constants.SPEED_OF_LIGHT, 1504.082291, FastMath.toRadians(243.73365222), FastMath.toRadians(8.78254167)};
// Check consistency
for (int i = 0; i < keywords2.length; i++) {
Assertions.assertEquals(keywords2[i], observations2.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs2[i], utc).durationFrom(observations2.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values2[i], observations2.get(i).getMeasurement(), 1.0e-12 * FastMath.abs(values2[i]));
}
// Comment
final List<String> dataComment2 = new ArrayList<String>();
dataComment2.add("This is a data comment");
Assertions.assertEquals(dataComment2, file.getSegments().get(1).getData().getComments());
}
/**
* Validation function for example 15.
* @param file Parsed TDM to validate
*/
private void validateTDMExample15(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(1.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-161T15:45:00", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("NASA/JPL",file.getHeader().getOriginator());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("TDM example created by yyyyy-nnnA Nav Team (NASA/JPL)");
headerComment.add("The following are clock offsets, in seconds between the");
headerComment.add("clocks at each DSN complex relative to UTC(NIST). The offset");
headerComment.add("is a mean of readings using several GPS space vehicles in");
headerComment.add("common view. Value is \"station clock minus UTC”.");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data 1
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2005-142T12:00:00", utc).durationFrom(metadata.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-145T12:00:00", utc).durationFrom(metadata.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("DSS-10", metadata.getParticipants().get(1));
Assertions.assertEquals("UTC-NIST", metadata.getParticipants().get(2));
final List<String> metaDataComment = new ArrayList<String>();
metaDataComment.add("Note: SPC10 switched back to Maser1 from Maser2 on 2005-142");
Assertions.assertEquals(metaDataComment, metadata.getComments());
// Data 1
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data 1
final String[] keywords = {"CLOCK_BIAS", "CLOCK_DRIFT",
"CLOCK_BIAS", "CLOCK_DRIFT",
"CLOCK_BIAS", "CLOCK_DRIFT",
"CLOCK_BIAS"};
final String[] epochs = {"2005-142T12:00:00", "2005-142T12:00:00",
"2005-143T12:00:00", "2005-143T12:00:00",
"2005-144T12:00:00", "2005-144T12:00:00",
"2005-145T12:00:00"};
final double[] values = {9.56e-7, 6.944e-14,
9.62e-7, -2.083e-13,
9.44e-7, -2.778e-13,
9.20e-7};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values[i], observations.get(i).getMeasurement(), 0.0);
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("This is a data comment");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
// Meta-Data 2
final TdmMetadata metadata2 = file.getSegments().get(1).getMetadata();
Assertions.assertEquals("UTC", metadata2.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2005-142T12:00:00", utc).durationFrom(metadata2.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-145T12:00:00", utc).durationFrom(metadata2.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("DSS-40", metadata2.getParticipants().get(1));
Assertions.assertEquals("UTC-NIST", metadata2.getParticipants().get(2));
final List<String> metaDataComment2 = new ArrayList<String>();
metaDataComment2.add("This is a meta-data comment");
Assertions.assertEquals(metaDataComment2, metadata2.getComments());
// Data 2
final List<Observation> observations2 = file.getSegments().get(1).getData().getObservations();
// Reference data 2
// Same keywords and dates than 1
final double[] values2 = {-7.40e-7, -3.125e-13,
-7.67e-7, -1.620e-13,
-7.81e-7, -4.745e-13,
-8.22e-7};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations2.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations2.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values2[i], observations2.get(i).getMeasurement(), 0.0);
}
// Comment
final List<String> dataComment2 = new ArrayList<String>();
dataComment2.add("This is a data comment");
Assertions.assertEquals(dataComment2, file.getSegments().get(1).getData().getComments());
// Meta-Data 3
final TdmMetadata metadata3 = file.getSegments().get(2).getMetadata();
Assertions.assertEquals("UTC", metadata3.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2005-142T12:00:00", utc).durationFrom(metadata3.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2005-145T12:00:00", utc).durationFrom(metadata3.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("DSS-60", metadata3.getParticipants().get(1));
Assertions.assertEquals("UTC-NIST", metadata3.getParticipants().get(2));
final List<String> metaDataComment3 = new ArrayList<String>();
metaDataComment3.add("This is a meta-data comment");
Assertions.assertEquals(metaDataComment3, metadata3.getComments());
// Data 3
final List<Observation> observations3 = file.getSegments().get(2).getData().getObservations();
// Reference data 2
// Same keywords and dates than 1
final double[] values3 = {-1.782e-6, 1.736e-13,
-1.767e-6, 1.157e-14,
-1.766e-6, 8.102e-14,
-1.759e-6};
// Check consistency
for (int i = 0; i < keywords.length; i++) {
Assertions.assertEquals(keywords[i], observations3.get(i).getType().name());
Assertions.assertEquals(new AbsoluteDate(epochs[i], utc).durationFrom(observations3.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals(values3[i], observations3.get(i).getMeasurement(), 0.0);
}
// Comment
final List<String> dataComment3 = new ArrayList<String>();
dataComment3.add("This is a data comment");
Assertions.assertEquals(dataComment3, file.getSegments().get(2).getData().getComments());
}
/**
* Validation function for example displaying all keywords.
* @param file Parsed TDM to validate
*/
private void validateTDMExampleAllKeywordsSequential(Tdm file) {
validateTDMExampleAllKeywordsCommon(file);
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals(TrackingMode.SEQUENTIAL, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 2, 1 }, metadata.getPath());
}
/**
* Validation function for example displaying all keywords.
* @param file Parsed TDM to validate
*/
private void validateTDMExampleAllKeywordsSingleDiff(Tdm file) {
validateTDMExampleAllKeywordsCommon(file);
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals(TrackingMode.SINGLE_DIFF, metadata.getMode());
Assertions.assertArrayEquals(new int[] { 4, 5 }, metadata.getPath1());
Assertions.assertArrayEquals(new int[] { 3, 2 }, metadata.getPath2());
}
/**
* Validation function for example displaying all keywords.
* @param file Parsed TDM to validate
*/
private void validateTDMExampleAllKeywordsCommon(Tdm file) {
final TimeScale utc = TimeScalesFactory.getUTC();
// Header
Assertions.assertEquals(2.0, file.getHeader().getFormatVersion(), 0.0);
Assertions.assertEquals(new AbsoluteDate("2017-06-14T10:53:00.000", utc).durationFrom(file.getHeader().getCreationDate()), 0.0, 0.0);
Assertions.assertEquals("CS GROUP",file.getHeader().getOriginator());
Assertions.assertEquals("04655f62-1ba0-4ca6-92e9-eb3411db3d44", file.getHeader().getMessageId().toLowerCase());
final List<String> headerComment = new ArrayList<String>();
headerComment.add("TDM example created by CS GROUP");
headerComment.add("Testing all TDM known meta-data and data keywords");
Assertions.assertEquals(headerComment, file.getHeader().getComments());
// Meta-Data
final TdmMetadata metadata = file.getSegments().get(0).getMetadata();
Assertions.assertEquals(1, metadata.getComments().size());
Assertions.assertEquals("All known meta-data keywords displayed", metadata.getComments().get(0));
Assertions.assertEquals(47, metadata.getDataTypes().size());
Assertions.assertEquals(ObservationType.CARRIER_POWER , metadata.getDataTypes().get( 0));
Assertions.assertEquals(ObservationType.DOPPLER_COUNT , metadata.getDataTypes().get( 1));
Assertions.assertEquals(ObservationType.DOPPLER_INSTANTANEOUS, metadata.getDataTypes().get( 2));
Assertions.assertEquals(ObservationType.DOPPLER_INTEGRATED , metadata.getDataTypes().get( 3));
Assertions.assertEquals(ObservationType.PC_N0 , metadata.getDataTypes().get( 4));
Assertions.assertEquals(ObservationType.RECEIVE_PHASE_CT_1 , metadata.getDataTypes().get( 5));
Assertions.assertEquals(ObservationType.RECEIVE_PHASE_CT_2 , metadata.getDataTypes().get( 6));
Assertions.assertEquals(ObservationType.RECEIVE_PHASE_CT_3 , metadata.getDataTypes().get( 7));
Assertions.assertEquals(ObservationType.RECEIVE_PHASE_CT_4 , metadata.getDataTypes().get( 8));
Assertions.assertEquals(ObservationType.RECEIVE_PHASE_CT_5 , metadata.getDataTypes().get( 9));
Assertions.assertEquals(ObservationType.TRANSMIT_PHASE_CT_1 , metadata.getDataTypes().get(10));
Assertions.assertEquals(ObservationType.TRANSMIT_PHASE_CT_2 , metadata.getDataTypes().get(11));
Assertions.assertEquals(ObservationType.TRANSMIT_PHASE_CT_3 , metadata.getDataTypes().get(12));
Assertions.assertEquals(ObservationType.TRANSMIT_PHASE_CT_4 , metadata.getDataTypes().get(13));
Assertions.assertEquals(ObservationType.TRANSMIT_PHASE_CT_5 , metadata.getDataTypes().get(14));
Assertions.assertEquals(ObservationType.PR_N0 , metadata.getDataTypes().get(15));
Assertions.assertEquals(ObservationType.RANGE , metadata.getDataTypes().get(16));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ_1 , metadata.getDataTypes().get(17));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ_2 , metadata.getDataTypes().get(18));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ_3 , metadata.getDataTypes().get(19));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ_4 , metadata.getDataTypes().get(20));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ_5 , metadata.getDataTypes().get(21));
Assertions.assertEquals(ObservationType.RECEIVE_FREQ , metadata.getDataTypes().get(22));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_1 , metadata.getDataTypes().get(23));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_2 , metadata.getDataTypes().get(24));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_3 , metadata.getDataTypes().get(25));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_4 , metadata.getDataTypes().get(26));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_5 , metadata.getDataTypes().get(27));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_RATE_1 , metadata.getDataTypes().get(28));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_RATE_2 , metadata.getDataTypes().get(29));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_RATE_3 , metadata.getDataTypes().get(30));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_RATE_4 , metadata.getDataTypes().get(31));
Assertions.assertEquals(ObservationType.TRANSMIT_FREQ_RATE_5 , metadata.getDataTypes().get(32));
Assertions.assertEquals(ObservationType.DOR , metadata.getDataTypes().get(33));
Assertions.assertEquals(ObservationType.VLBI_DELAY , metadata.getDataTypes().get(34));
Assertions.assertEquals(ObservationType.ANGLE_1 , metadata.getDataTypes().get(35));
Assertions.assertEquals(ObservationType.ANGLE_2 , metadata.getDataTypes().get(36));
Assertions.assertEquals(ObservationType.MAG , metadata.getDataTypes().get(37));
Assertions.assertEquals(ObservationType.RCS , metadata.getDataTypes().get(38));
Assertions.assertEquals(ObservationType.CLOCK_BIAS , metadata.getDataTypes().get(39));
Assertions.assertEquals(ObservationType.CLOCK_DRIFT , metadata.getDataTypes().get(40));
Assertions.assertEquals(ObservationType.STEC , metadata.getDataTypes().get(41));
Assertions.assertEquals(ObservationType.TROPO_DRY , metadata.getDataTypes().get(42));
Assertions.assertEquals(ObservationType.TROPO_WET , metadata.getDataTypes().get(43));
Assertions.assertEquals(ObservationType.PRESSURE , metadata.getDataTypes().get(44));
Assertions.assertEquals(ObservationType.RHUMIDITY , metadata.getDataTypes().get(45));
Assertions.assertEquals(ObservationType.TEMPERATURE , metadata.getDataTypes().get(46));
Assertions.assertEquals("UTC", metadata.getTimeSystem().name());
Assertions.assertEquals(new AbsoluteDate("2017-06-14T10:53:00.000", utc).durationFrom(metadata.getStartTime()), 0.0, 0.0);
Assertions.assertEquals(new AbsoluteDate("2017-06-15T10:53:00.000", utc).durationFrom(metadata.getStopTime()), 0.0, 0.0);
Assertions.assertEquals("DSS-25", metadata.getParticipants().get(1));
Assertions.assertEquals("yyyy-nnnA", metadata.getParticipants().get(2));
Assertions.assertEquals("P3", metadata.getParticipants().get(3));
Assertions.assertEquals("P4", metadata.getParticipants().get(4));
Assertions.assertEquals("P5", metadata.getParticipants().get(5));
Assertions.assertEquals("S", metadata.getTransmitBand());
Assertions.assertEquals("L", metadata.getReceiveBand());
Assertions.assertEquals(240, metadata.getTurnaroundNumerator(), 0);
Assertions.assertEquals(221, metadata.getTurnaroundDenominator(), 0);
Assertions.assertEquals(TimetagReference.TRANSMIT, metadata.getTimetagRef());
Assertions.assertEquals(1.0, metadata.getIntegrationInterval(), 0.0);
Assertions.assertEquals(IntegrationReference.MIDDLE, metadata.getIntegrationRef());
Assertions.assertEquals(32021035200.0, metadata.getFreqOffset(), 0.0);
Assertions.assertEquals(RangeMode.COHERENT, metadata.getRangeMode());
Assertions.assertEquals(32768.0, metadata.getRawRangeModulus(), 0.0);
Assertions.assertEquals(RangeUnits.RU, metadata.getRangeUnits());
Assertions.assertEquals(AngleType.RADEC, metadata.getAngleType());
Assertions.assertEquals("EME2000", metadata.getReferenceFrame().getName());
Assertions.assertEquals(CelestialBodyFrame.EME2000, metadata.getReferenceFrame().asCelestialBodyFrame());
Assertions.assertEquals(FramesFactory.getEME2000(), metadata.getReferenceFrame().asFrame());
Assertions.assertEquals("HERMITE", metadata.getInterpolationMethod());
Assertions.assertEquals(5, metadata.getInterpolationDegree());
Assertions.assertEquals(120000.0, metadata.getDopplerCountBias(), 1.0e-5);
Assertions.assertEquals(1000.0, metadata.getDopplerCountScale(), 1.0e-10);
Assertions.assertFalse(metadata.hasDopplerCountRollover());
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(1), 0.0);
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(2), 0.0);
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(3), 0.0);
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(4), 0.0);
Assertions.assertEquals(0.000077, metadata.getTransmitDelays().get(5), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(1), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(2), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(3), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(4), 0.0);
Assertions.assertEquals(0.000077, metadata.getReceiveDelays().get(5), 0.0);
Assertions.assertEquals(DataQuality.RAW, metadata.getDataQuality());
Assertions.assertEquals(FastMath.toRadians(1.0), metadata.getCorrectionAngle1(), 0.0);
Assertions.assertEquals(FastMath.toRadians(2.0), metadata.getCorrectionAngle2(), 0.0);
Assertions.assertEquals(3000.0, metadata.getCorrectionDoppler(), 0.0);
Assertions.assertEquals(4.0, metadata.getCorrectionMagnitude(), 0.0);
Assertions.assertEquals(5.0, metadata.getRawCorrectionRange(), 0.0);
Assertions.assertEquals(6.0, metadata.getCorrectionRcs(), 0.0);
Assertions.assertEquals(7.0, metadata.getCorrectionReceive(), 0.0);
Assertions.assertEquals(8.0, metadata.getCorrectionTransmit(), 0.0);
Assertions.assertEquals(FastMath.toRadians(9.0), metadata.getCorrectionAberrationYearly(), 0.0);
Assertions.assertEquals(FastMath.toRadians(10.0), metadata.getCorrectionAberrationDiurnal(), 0.0);
Assertions.assertEquals(CorrectionApplied.YES, metadata.getCorrectionsApplied());
final List<String> metaDataComment = new ArrayList<String>();
metaDataComment.add("All known meta-data keywords displayed");
Assertions.assertEquals(metaDataComment, metadata.getComments());
// Data
final List<Observation> observations = file.getSegments().get(0).getData().getObservations();
// Reference data
final AbsoluteDate epoch = new AbsoluteDate("2017-06-14T10:53:00.000", utc);
// Check consistency
for (int i = 0; i < metadata.getDataTypes().size(); i++) {
Assertions.assertEquals(metadata.getDataTypes().get(i), observations.get(i).getType());
Assertions.assertEquals(epoch.shiftedBy((double) (i+1)).durationFrom(observations.get(i).getEpoch()), 0.0, 0.0);
Assertions.assertEquals((double) (i+1), observations.get(i).getMeasurement(), 1.0e-12);
}
// Comment
final List<String> dataComment = new ArrayList<String>();
dataComment.add("Data Related Keywords");
Assertions.assertEquals(dataComment, file.getSegments().get(0).getData().getComments());
}
}
| {
"content_hash": "4ce864369fe122a50f8364ed7cbef410",
"timestamp": "",
"source": "github",
"line_count": 1142,
"max_line_length": 170,
"avg_line_length": 57.646234676007005,
"alnum_prop": 0.6768288978004617,
"repo_name": "CS-SI/Orekit",
"id": "2d82290a8fc204d8442f8457285673c8ba973e1e",
"size": "66634",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/test/java/org/orekit/files/ccsds/ndm/tdm/TdmParserTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Fortran",
"bytes": "7408"
},
{
"name": "HTML",
"bytes": "19673"
},
{
"name": "Java",
"bytes": "25367949"
},
{
"name": "Roff",
"bytes": "31072"
},
{
"name": "XSLT",
"bytes": "734"
}
],
"symlink_target": ""
} |
#include <aws/codedeploy/model/BatchGetDeploymentGroupsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::CodeDeploy::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
BatchGetDeploymentGroupsRequest::BatchGetDeploymentGroupsRequest() :
m_applicationNameHasBeenSet(false),
m_deploymentGroupNamesHasBeenSet(false)
{
}
Aws::String BatchGetDeploymentGroupsRequest::SerializePayload() const
{
JsonValue payload;
if(m_applicationNameHasBeenSet)
{
payload.WithString("applicationName", m_applicationName);
}
if(m_deploymentGroupNamesHasBeenSet)
{
Aws::Utils::Array<JsonValue> deploymentGroupNamesJsonList(m_deploymentGroupNames.size());
for(unsigned deploymentGroupNamesIndex = 0; deploymentGroupNamesIndex < deploymentGroupNamesJsonList.GetLength(); ++deploymentGroupNamesIndex)
{
deploymentGroupNamesJsonList[deploymentGroupNamesIndex].AsString(m_deploymentGroupNames[deploymentGroupNamesIndex]);
}
payload.WithArray("deploymentGroupNames", std::move(deploymentGroupNamesJsonList));
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection BatchGetDeploymentGroupsRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "CodeDeploy_20141006.BatchGetDeploymentGroups"));
return headers;
}
| {
"content_hash": "70a43bd4e8af6b1885f10c00894b9a4c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 145,
"avg_line_length": 27.51923076923077,
"alnum_prop": 0.793151642208246,
"repo_name": "aws/aws-sdk-cpp",
"id": "27c0e246821cfd47bf04019539bfe686535f9d15",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "aws-cpp-sdk-codedeploy/source/model/BatchGetDeploymentGroupsRequest.cpp",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "309797"
},
{
"name": "C++",
"bytes": "476866144"
},
{
"name": "CMake",
"bytes": "1245180"
},
{
"name": "Dockerfile",
"bytes": "11688"
},
{
"name": "HTML",
"bytes": "8056"
},
{
"name": "Java",
"bytes": "413602"
},
{
"name": "Python",
"bytes": "79245"
},
{
"name": "Shell",
"bytes": "9246"
}
],
"symlink_target": ""
} |
USER=yourjenkinsusername
PWD=somejenkinspwdtoken
URL=https://jenkins.yourdomain.com/view/product/eFeedback/cc.xml
# Directory setup
SCRIPTDIR=$(dirname $(readlink -f $0))
SOUNDDIR="$SCRIPTDIR/../sounddir"
FILEOLD="/tmp/efeedback_old.xml"
FILENEW="/tmp/efeedback_new.xml"
# Debug data
#FILEOLD="../testdata/cc-test_allOk.xml"
#FILENEW="../testdata/cc-test_oneFailed.xml"
###########################################################
# Get Jenkins XML status
wget -O- -q --no-check-certificate --auth-no-challenge --http-user=$USER --http-password=$PWD $URL > $FILENEW
RESULT=`ruby $SCRIPTDIR/JenkinsMonitor.rb $FILEOLD $FILENEW`
cp $FILENEW $FILEOLD
function playSound {
echo "playing $1"
play -q $1
}
# enter line in logfile
NOW=$(date +"%m%d_%H%m%S")
echo "$NOW $RESULT"
# define result action
case $RESULT in
FirstFailure)
playSound "$SOUNDDIR/firstfail_redalert.mp3"
;;
LastFixed)
playSound "$SOUNDDIR/allfixed_diagnosticcomplete_ep.mp3"
;;
StillFailing)
playSound "$SOUNDDIR/stillfail_warningwarpcorecollapse_ep.mp3"
;;
NewFailure)
playSound "$SOUNDDIR/onefail_tos_hullhit_3.mp3"
;;
OneFixed)
playSound "$SOUNDDIR/onefixed_voy-doc-onedown.wav"
;;
OK)
;;
*)
echo "Unknown"
esac
| {
"content_hash": "91efc295e99e4f1f435179c41338d323",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 110,
"avg_line_length": 21.379310344827587,
"alnum_prop": 0.6758064516129032,
"repo_name": "LechnerMartin/KISSJenkinsAudioMonitor",
"id": "a351643e14ecc3c3f64fddff26ddab915c395fed",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/JenkinsMonitor.sh",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "10840"
},
{
"name": "Shell",
"bytes": "1382"
}
],
"symlink_target": ""
} |
package be.kdg.healthtips.task;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.Intent;
import android.os.AsyncTask;
import android.os.Build;
import com.temboo.Library.Fitbit.Activities.GetActivityDailyGoals;
import com.temboo.core.TembooException;
import com.temboo.core.TembooSession;
import org.json.JSONException;
import org.json.JSONObject;
import be.kdg.healthtips.activity.LoginActivity;
import be.kdg.healthtips.auth.FitbitTokenManager;
import be.kdg.healthtips.session.TembooSessionManager;
public class GetDailyGoalATask extends AsyncTask<Void, Void, JSONObject> {
private FitbitTokenManager tokenManager;
private Context context;
@TargetApi(Build.VERSION_CODES.CUPCAKE)
public GetDailyGoalATask(Context context) {
super();
this.tokenManager = FitbitTokenManager.getInstance(context);
this.context = context;
}
@Override
protected JSONObject doInBackground(Void... params) {
try {
TembooSession session = TembooSessionManager.getSession();
GetActivityDailyGoals getActivityDailyGoalsChoreo = new GetActivityDailyGoals(session);
GetActivityDailyGoals.GetActivityDailyGoalsInputSet input = getActivityDailyGoalsChoreo.newInputSet();
input.set_AccessToken(tokenManager.getFitBitAccesToken());
input.set_AccessTokenSecret(tokenManager.getFitBitAccesTokenSecret());
input.set_ConsumerSecret(FitbitTokenManager.getConsumerSecret());
input.set_ConsumerKey(FitbitTokenManager.getConsumerKey());
GetActivityDailyGoals.GetActivityDailyGoalsResultSet result = getActivityDailyGoalsChoreo.execute(input);
return new JSONObject(result.get_Response());
} catch (TembooException e) {
if (e.getMessage().contains("status code of 401")) {
Intent intent = new Intent(context, LoginActivity.class);
context.startActivity(intent);
}
System.err.println("Temboo throwed an exception, can't get daily goal from Temboo API.");
} catch (JSONException e) {
e.printStackTrace();
}
return null;
}
}
| {
"content_hash": "252793138ece9b6b0e0a6093cbef3898",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 117,
"avg_line_length": 39.607142857142854,
"alnum_prop": 0.7204688908926962,
"repo_name": "GoGoris/TipFit",
"id": "74e6f4cb7db54f849b36fa69f312ddd53922ff5b",
"size": "2218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/main/java/be/kdg/healthtips/task/GetDailyGoalATask.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "82830"
}
],
"symlink_target": ""
} |
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | {
"content_hash": "a38f3d715f389e66ed2592e19f2941f4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 31,
"avg_line_length": 9.692307692307692,
"alnum_prop": 0.7063492063492064,
"repo_name": "mdoering/backbone",
"id": "eee414b9de8774c0af0f04909a01c69bdcae2916",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "life/Plantae/Magnoliophyta/Magnoliopsida/Boraginales/Boraginaceae/Echinospermum/Echinospermum deflexum/Echinospermum deflexum americanum/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
using System;
using System.CodeDom;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Media;
namespace EmptyKeys.UserInterface.Generator.Types.Controls
{
/// <summary>
/// Implements Canvas control generator
/// </summary>
public class CanvasGeneratorType : PanelGeneratorType
{
/// <summary>
/// Gets the type of the xaml.
/// </summary>
/// <value>
/// The type of the xaml.
/// </value>
public override Type XamlType
{
get { return typeof(Canvas); }
}
/// <summary>
/// Generates the specified initialize method.
/// </summary>
/// <param name="source">The dependency object</param>
/// <param name="classType">Type of the class.</param>
/// <param name="initMethod">The initialize method.</param>
/// <param name="generateField">if set to <c>true</c> [generate field].</param>
/// <returns></returns>
public override CodeExpression Generate(DependencyObject source, CodeTypeDeclaration classType, CodeMemberMethod initMethod, bool generateField)
{
CodeExpression fieldReference = base.Generate(source, classType, initMethod, generateField);
Canvas canvas = source as Canvas;
return fieldReference;
}
}
}
| {
"content_hash": "d5b1092fcdde739113be70f71d809d75",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 152,
"avg_line_length": 34.4,
"alnum_prop": 0.6111111111111112,
"repo_name": "EmptyKeys/UI_Generator",
"id": "8e777cf18c7cbfe8cbe4624bc25bd5eec84a330f",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UIGenerator/Types/Controls/CanvasGeneratorType.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "112"
},
{
"name": "C#",
"bytes": "510179"
},
{
"name": "FLUX",
"bytes": "1087"
}
],
"symlink_target": ""
} |
Acumen Solutions Web App, called FDA Food Recalls leverages the openFDA API for the Food data sets. Acumen's team decided to use the Food data sets of the API, instead of the Drug data sets, in order to present a robust solution while demonstrating it's agile capacity in the short period available for prototyping.
### Approach
Since Mobile devices continue to grow in popularity year after year, and current analysis predicts that global mobile data traffic will increase nearly 11-fold between 2013 and 2018 ([http://goo.gl/xlVQfz](http://goo.gl/xlVQfz)). The need to have a responsive web app is becoming increasingly important. The Acumen team looked at foundations and frameworks that support modern Web 3.0 websites that are flexible enough for customization and growth, especially in an Agile environment. Among all the available CSS frameworks out there, Bootstrap has been chosen because of the following reasons:
Reason #1. Platform agnostic: Easy to get started, faster coding
CSS Pre-processing is great and every front end developer should learn it. However not everyone is using it. There are still many designers creating and managing CSS files the same old way. Bootstrap offers LESS files for those who know how to use it, but also provides the plain old CSS file for those who don’t want to use CSS pre-processing.
To take advantage of what Bootstrap has to offer, a developer just has to download the files from Bootstrap on Github and after unzipping, include the files in the head of any HTML document.
Reason #2. Cross browser Compatibility: Great grid system for any device size
Bootstrap is built on responsive 12-column grids, layouts and components. Whether the design calls for a fixed grid or a responsive one, its only matter of a few changes. Offsetting & Nesting of columns is also possible in both fixed and fluid width layouts.
Another useful set of features are the responsive utility classes using which a developer can make a certain block of content appear or hide only on devices based on the size of their screen. This customization possibility is very handy when a developer wants to hide some content based on screen size. Adding a class such as .visible-desktop to an element, will make it visible only for desktop users. There are similar classes for tablets and phones.
Reason #3. Base styling for most HTML elements
A website has many different elements such as headings, lists, tables, buttons, forms, etc. All these fundamental HTML elements have been styled and enhanced with extensible classes. The HTML elements for which styles are provided are:
Typography Code Tables Forms Buttons Images Icons
Reason #4. Extensive list of components
Styling of every single element follows a consistent theme and takes just few minutes. Some of the components pre-styled are:
Dropdowns Button Groups Navigation Bar Breadcrumbs Labels & Badges Alerts Progress Bar And many others.
Reason #5. Bundled Javascript plugins
The components such as drop down menu are made interactive with the numerous JavaScript plugins bundled in the bootstrap package. If a project requires sliders, tabs, accordions, then a developer no longer has to try and test numerous different plugins across the web. Adding these functionalities is just a matter of adding few lines of code.
#### Using Angularjs
Angular is written from the perspective of putting more horsepower under the hood of the HTML code of a web application or site, an approach known in the Angular world as "Directives".
With Angular HTML attributes are automatically connected to functions that perform routine and straightforward actions behind the scenes, thereby eliminating the need for some (or potentially a big chunk) of laborious JS code.
#### Project Team
Project Leadership for this project was defined to be Adam Horvath, Deepak Gupta and Saurabh Verma. These three Account Level Executives were responsible for producing the response to the Request for Proposal and the integrity of the overall solution.
Jamil Masarweh was identified by Project Leadership as the Product Owner since he holds a technical and business background with over 8 years of experience.
#### Labor Category
- Technical Architect - Sahil Grover
- Usability Design - Austin Fadely
- Writer Content - Jamil Masarweh
- Frontend Developer - Matt Heim
- Backend Developer - Claude Sutterlin
- Delivery Manager - Girish Ranade
- Agile Coach - A dedicated resource was not needed for this role, the selected team members are well versed in the tenets of Agile development and daily scrum standup meetings were managed by Girish Ranade, a certified Scrum Master
- Business Analysis - Austin Fadely
Note: In the case of Austin Fadley, he played 2 separate roles as neither was full time and both roles were important to the success of the project.
### Understanding what people need: Human Centric Design
Surveys: Interviews were based on a few short questions with scope for the user to expand on their responses. These surveys were sent to the Project Stakeholders and a few members of the general public. The Survey results helped in identifying and prioritizing requirements. Survey Questions can be found in the Github link:
[https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/GSA%20Agile%20Prototype%20Survey.docx](https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/GSA%20Agile%20Prototype%20Survey.docx)
Focus Groups: Group participants represented a cross section of the Project Team and Stakeholders - providing a realistic representation of the consumer. With the time limits on the project, these sessions were restricted to short sessions and used brainstorming where all team members were encouraged to add new ideas or add onto a previous one. Examples of ideas from these discussions include:
a) Mobile Application
b) Easy to navigate with minimal keystrokes.
c) Must be intuitive, i.e should not have lots of unnecessary verbiage on the screen
Scenarios of Use: Allowed team members to provide detailed realistic examples of how users would carry out their tasks with the application. The objective was to provide examples of the use as an aid to understanding and clarifying user requirements and to provide a basis for later usability testing.
### Design style guide and pattern library
A guide for the web application style and pattern library has been included, and can be found in
### Unit Test and Testing with people
Unit testing has been part of the development effort. Using a test driven development strategy the team wrote test cases to allow testing of each component before releasing the application to the QA team (using $scope).
AngularJS provides dependency injection for the application XHR requests, which can be mocked, and allowed the developer team to provide abstractions which allows testing the model without having to resort to manipulating the DOM. The test can then assert that the data passed was processed successfully without having to create or look at the state of the DOM or wait for any XHR requests to return data. Functions were tested in isolation using $scope.
The goal was to cover 100% of the developed code with automated tests (using embedded AngularJS scope feature, modular injection and Heroku deployment test. Automated Test here means, testing without 3rd party tools or 3rd party licenses). As code was completed, unit tests were written to validate the functionality of each individual component of the application.
After unit testing was completed the application was prepared and deployed via Heroku to allow system level or end to end testing by the QA team
Austin Fadely, the lead Usability Designer and Business Analyst, was assigned to develop the testing methodology and approach and then coordinate all testing and application validation activities.
A test plan, which documents the overall testing strategy was completed and can be found at
[https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/%20Test%20Plan.docx](https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/%20Test%20Plan.docx)
Using the acceptance criteria from the user stories, test cases were developed.
From the test cases, detailed step by step test scripts, for the tester team, were created and can be found at :
[https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/%20Test%20Cases%20.xlsx](https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/%20Test%20Cases%20.xlsx)
To ensure traceability, each test case was cross-referenced to the User Story ID, which had been assigned in Pivotal Tracker. The testing team was instructed to note any discrepancy whether in the script itself or with the application. A triage process was instituted report items as as following :
a) Is this a duplicate, if so combine
b) Is this preventing the application from working i.e is this a showstopper
c) Is this a new request
d) Is this a "must have" item or a "nice to have" item
A Link to the GitHub repository listing all the created issues can be found at
[https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/Pivotal_Tracker_Export_of%20all_Issues%20.xlsx](https://github.com/AcumenSolutions/acumen-gsa-agile-prototype/raw/master/Agile%20Project%20Artifacts/Pivotal_Tracker_Export_of%20all_Issues%20.xlsx)
The strategy and agile approach used was to incorporate team feedback and provide a daily deployment - at a minimum - allowing testing and the opportunity for feedback. Depending on the volume of issues or bugs addressed, deployments were sometimes more frequent.
#### Continuous Integration and Monitoring
The build process uses gulp (which is detailed in the open source section above), which is configured to run all tests. Continuous integration was achieved through Heroku, which can automatically deploy the latest build version following a push from the developer's local machine to the remote repository.
Continuous monitoring, which monitors the health of the application and checks for system level and run time errors is a built in feature of Heroku: https://devcenter.heroku.com/articles/production-check#visibility-and-monitoring and also https://devcenter.heroku.com/articles/metrics
### Prerequisites
[Node.js](https://nodejs.org/download/)
[Heroku Toolbelt](https://toolbelt.heroku.com)
[Bower](http://bower.io/)
[Gulp](http://gulpjs.com/)
#### Installation
1. Sign-up for an API key from the [Open FDA API](https://open.fda.gov)
2. Clone the repository `git clone https://github.com/AcumenSolutions/acumen-gsa-agile-prototype.git'
3. Install node dependencies `sudo npm install`
4. Install bower dependencies `bower install`
5. Build all dependencies and static files `gulp build`
#### Local configuration
1. Store your API key locally `export OpenFDAAPIKey=<your api key>`
2. Launch the application `npm start`
#### Heroku configuration
[](https://heroku.com/deploy)
#### Or deploy manually
1. Create a new app on Heroku `heroku create <project name>`
2. Set your API in the Heroku config `heroku config:set OpenFDAAPIKey=<your api key>`
3. Deploy the application to Heroku `git push heroku master`
### Usage
[Try our hosted demo](https://acumen-gsa-prototype.herokuapp.com)
1. In a web browser navigate to the application url ([http://localhost:3000](http://localhost:3000) or [http://`<project name>`.herokuapp.com](http://`<project name>`.herokuapp.com))
2. Select a recall category
3. Choose a state for which you'd like to view recalls
4. (Optional) Enter product keywords to find a particular recall.
5. Click Search
6. Select a result to view detailed information about the recall
### Runing Unit Tests
Run the command `gulp test` to run both client and server side tests.
If you want to run just the client tests, run `gulp test:client`, and for just server tests run `gulp test:server`.
### Contributing
1. Fork it!
2. Create your feature branch: `git checkout -b my-new-feature`
3. Commit your changes: `git commit -am 'Add some feature'`
4. Push to the branch: `git push origin my-new-feature`
5. Submit a pull request
### License
The MIT license
Copyright (c) 2015 Acumen Solutions, Inc | {
"content_hash": "c5a0356c325ea6cb8e96f28c7615f3d8",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 594,
"avg_line_length": 73.5,
"alnum_prop": 0.7987995198079232,
"repo_name": "AcumenSolutions/acumen-gsa-agile-prototype",
"id": "2c195e7297cdd4af4ddda5884d465d2b1de03950",
"size": "12577",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14326"
},
{
"name": "HTML",
"bytes": "12506"
},
{
"name": "Handlebars",
"bytes": "74"
},
{
"name": "JavaScript",
"bytes": "72206"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Reflection;
using System.Text;
using Orleans.Runtime;
namespace Orleans
{
internal static class InternerConstants
{
/* Recommended cache sizes, based on expansion policy of ConcurrentDictionary
// Internal implementation of ConcurrentDictionary resizes to prime numbers (not divisible by 3 or 5 or 7)
31
67
137
277
557
1,117
2,237
4,477
8,957
17,917
35,837
71,677
143,357
286,717
573,437
1,146,877
2,293,757
4,587,517
9,175,037
18,350,077
36,700,157
*/
public const int SIZE_SMALL = 67;
public const int SIZE_MEDIUM = 1117;
public const int SIZE_LARGE = 143357;
public const int SIZE_X_LARGE = 2293757;
public static readonly TimeSpan DefaultCacheCleanupFreq = TimeSpan.FromMinutes(10);
}
/// <summary>
/// Provide a weakly-referenced cache of interned objects.
/// Interner is used to optimise garbage collection.
/// We use it to store objects that are allocated frequently and may have long timelife.
/// This means those object may quickly fill gen 2 and cause frequent costly full heap collections.
/// Specificaly, a message that arrives to a silo and all the headers and ids inside it may stay alive long enough to reach gen 2.
/// Therefore, we store all ids in interner to re-use their memory accros different messages.
/// </summary>
/// <typeparam name="K">Type of objects to be used for intern keys</typeparam>
/// <typeparam name="T">Type of objects to be interned / cached</typeparam>
internal class Interner<K, T> where T : class
{
private static readonly string internCacheName = "Interner-" + typeof(T).Name;
private readonly Logger logger;
private readonly TimeSpan cacheCleanupInterval;
private readonly SafeTimer cacheCleanupTimer;
[NonSerialized]
private readonly ConcurrentDictionary<K, WeakReference> internCache;
public Interner()
: this(InternerConstants.SIZE_SMALL)
{
}
public Interner(int initialSize)
: this(initialSize, Constants.INFINITE_TIMESPAN)
{
}
public Interner(int initialSize, TimeSpan cleanupFreq)
{
if (initialSize <= 0) initialSize = InternerConstants.SIZE_MEDIUM;
int concurrencyLevel = Environment.ProcessorCount * 4; // Default from ConcurrentDictionary class in .NET 4.0
logger = LogManager.GetLogger(internCacheName, LoggerType.Runtime);
this.internCache = new ConcurrentDictionary<K, WeakReference>(concurrencyLevel, initialSize);
this.cacheCleanupInterval = (cleanupFreq <= TimeSpan.Zero) ? Constants.INFINITE_TIMESPAN : cleanupFreq;
if (Constants.INFINITE_TIMESPAN != cacheCleanupInterval)
{
if (logger.IsVerbose) logger.Verbose(ErrorCode.Runtime_Error_100298, "Starting {0} cache cleanup timer with frequency {1}", internCacheName, cacheCleanupInterval);
cacheCleanupTimer = new SafeTimer(InternCacheCleanupTimerCallback, null, cacheCleanupInterval, cacheCleanupInterval);
}
#if DEBUG_INTERNER
StringValueStatistic.FindOrCreate(internCacheName, () => String.Format("Size={0}, Content=" + Environment.NewLine + "{1}", internCache.Count, PrintInternerContent()));
#endif
}
/// <summary>
/// Find cached copy of object with specified key, otherwise create new one using the supplied creator-function.
/// </summary>
/// <param name="key">key to find</param>
/// <param name="creatorFunc">function to create new object and store for this key if no cached copy exists</param>
/// <returns>Object with specified key - either previous cached copy or newly created</returns>
public T FindOrCreate(K key, Func<T> creatorFunc)
{
T obj = null;
WeakReference cacheEntry = internCache.GetOrAdd(key,
(k) => {
obj = creatorFunc();
return new WeakReference(obj);
});
if (cacheEntry != null)
{
if (cacheEntry.IsAlive)
{
// Re-use cached object
obj = cacheEntry.Target as T;
}
}
if (obj == null)
{
// Create new object
obj = creatorFunc();
cacheEntry = new WeakReference(obj);
obj = internCache.AddOrUpdate(key, cacheEntry, (k, w) => cacheEntry).Target as T;
}
return obj;
}
/// <summary>
/// Find cached copy of object with specified key, otherwise create new one using the supplied creator-function.
/// </summary>
/// <param name="key">key to find</param>
/// <param name="obj">The existing value if the key is found</param>
public bool TryFind(K key, out T obj)
{
obj = null;
WeakReference cacheEntry;
if(internCache.TryGetValue(key, out cacheEntry))
{
if (cacheEntry != null)
{
if (cacheEntry.IsAlive)
{
obj = cacheEntry.Target as T;
return obj != null;
}
}
}
return false;
}
/// <summary>
/// Find cached copy of object with specified key, otherwise store the supplied one.
/// </summary>
/// <param name="key">key to find</param>
/// <param name="obj">The new object to store for this key if no cached copy exists</param>
/// <returns>Object with specified key - either previous cached copy or justed passed in</returns>
public T Intern(K key, T obj)
{
return FindOrCreate(key, () => obj);
}
/// <summary>
/// Intern the specified object, replacing any previous cached copy of object with specified key if the new object has a more derived type than the cached object
/// </summary>
/// <param name="key">object key</param>
/// <param name="obj">object to be interned</param>
/// <returns>Interned copy of the object with specified key</returns>
public T InternAndUpdateWithMoreDerived(K key, T obj)
{
T obj1 = obj;
WeakReference cacheEntry = internCache.GetOrAdd(key, k => new WeakReference(obj1));
if (cacheEntry != null)
{
if (cacheEntry.IsAlive)
{
T obj2 = cacheEntry.Target as T;
// Decide whether the old object or the new one has the most specific / derived type
Type tNew = obj.GetType();
Type tOld = obj2.GetType();
if (tNew != tOld && tOld.IsAssignableFrom(tNew))
{
// Keep and use the more specific type
cacheEntry.Target = obj;
return obj;
}
else
{
// Re-use cached object
return obj2;
}
}
else
{
cacheEntry.Target = obj;
return obj;
}
}
else
{
cacheEntry = new WeakReference(obj);
obj = internCache.AddOrUpdate(key, cacheEntry, (k, w) => cacheEntry).Target as T;
return obj;
}
}
public void StopAndClear()
{
internCache.Clear();
if(cacheCleanupTimer != null)
{
cacheCleanupTimer.Dispose();
}
}
public List<T> AllValues()
{
List<T> values = new List<T>();
foreach (var e in internCache)
{
if (e.Value != null && e.Value.IsAlive && e.Value.Target != null)
{
T obj = e.Value.Target as T;
if (obj != null)
{
values.Add(obj);
}
}
}
return values;
}
private void InternCacheCleanupTimerCallback(object state)
{
Stopwatch clock = null;
long numEntries = 0;
var removalResultsLoggingNeeded = logger.IsVerbose || logger.IsVerbose2;
if (removalResultsLoggingNeeded)
{
clock = new Stopwatch();
clock.Start();
numEntries = internCache.Count;
}
foreach (var e in internCache)
{
if (e.Value == null || e.Value.IsAlive == false || e.Value.Target == null)
{
WeakReference weak;
bool ok = internCache.TryRemove(e.Key, out weak);
if (!ok)
{
if (logger.IsVerbose) logger.Verbose(ErrorCode.Runtime_Error_100295, "Could not remove old {0} entry: {1} ", internCacheName, e.Key);
}
}
}
if (!removalResultsLoggingNeeded) return;
var numRemoved = numEntries - internCache.Count;
if (numRemoved > 0)
{
if (logger.IsVerbose) logger.Verbose(ErrorCode.Runtime_Error_100296, "Removed {0} / {1} unused {2} entries in {3}", numRemoved, numEntries, internCacheName, clock.Elapsed);
}
else
{
if (logger.IsVerbose2) logger.Verbose2(ErrorCode.Runtime_Error_100296, "Removed {0} / {1} unused {2} entries in {3}", numRemoved, numEntries, internCacheName, clock.Elapsed);
}
}
private string PrintInternerContent()
{
StringBuilder s = new StringBuilder();
foreach (var e in internCache)
{
if (e.Value != null && e.Value.IsAlive && e.Value.Target != null)
{
s.AppendLine(String.Format("{0}->{1}", e.Key, e.Value.Target));
}
}
return s.ToString();
}
}
}
| {
"content_hash": "760999a8e4dd78eb65a6d3b6aa5e8591",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 190,
"avg_line_length": 37.97872340425532,
"alnum_prop": 0.5334267040149393,
"repo_name": "sebastianburckhardt/orleans",
"id": "e453a0db2f748759df21c5e108c79db66ce01386",
"size": "10710",
"binary": false,
"copies": "2",
"ref": "refs/heads/geo-orleans",
"path": "src/Orleans/IDs/Interner.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "17167"
},
{
"name": "C#",
"bytes": "6929530"
},
{
"name": "F#",
"bytes": "3772"
},
{
"name": "GLSL",
"bytes": "74"
},
{
"name": "Groovy",
"bytes": "903"
},
{
"name": "HTML",
"bytes": "234868"
},
{
"name": "PLpgSQL",
"bytes": "52704"
},
{
"name": "PowerShell",
"bytes": "111609"
},
{
"name": "Protocol Buffer",
"bytes": "1683"
},
{
"name": "Smalltalk",
"bytes": "1584"
},
{
"name": "Visual Basic",
"bytes": "25531"
}
],
"symlink_target": ""
} |
Web developer
### Projects:
* [Google Wifi Status](https://www.npmjs.com/package/google-wifi-status)
* [File Hash Verifier](https://joelgeorgev.github.io/file-hash-verifier/)
* [Emoji Chatbot](https://joelgeorgev.github.io/emoji-chatbot)
* [React TypeScript Setup Guide](https://github.com/joelgeorgev/react-ts-setup-guide)
* [React Checkbox Tree](https://joelgeorgev.github.io/react-checkbox-tree/)
* [Learning Observable in TypeScript](https://github.com/joelgeorgev/learning-observable-typescript)
### Find me on:
* [Twitter](https://www.twitter.com/joelgeorgev)
* [Keybase](https://www.keybase.io/joelgeorgev)
* [Reddit](https://www.reddit.com/user/joelgeorgev)
* [Medium](https://medium.com/@joelgeorgev) | {
"content_hash": "94f939f43864f54597b5c3b8049810c7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 100,
"avg_line_length": 47.4,
"alnum_prop": 0.7524613220815752,
"repo_name": "joelgeorgev/joelgeorgev.github.io",
"id": "568c96d7da55de036b789d02cb1db36c170466ba",
"size": "723",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "index.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12189"
},
{
"name": "JavaScript",
"bytes": "48"
}
],
"symlink_target": ""
} |
using System.Collections.Generic;
using HS.SyntaxHighlighting;
namespace HS.PasteBin.Web.Tests
{
public class MockHighlighter : IHighlighter
{
private readonly IDictionary<string, string> languages;
public MockHighlighter(IDictionary<string, string> languages)
{
this.languages = languages;
}
public IDictionary<string, string> Languages
{
get { return languages; }
}
public string Highlight(string text, string languageKey)
{
return languages[languageKey] + "|" + text;
}
}
}
| {
"content_hash": "a046f48744b23d7b076a8f938e59b1be",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 25.4,
"alnum_prop": 0.5905511811023622,
"repo_name": "rikkus/hs-pastebin",
"id": "25d0492d4bea847231126743be90fac854ae2f4f",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HS.PasteBin.Web.Tests/MockHighlighter.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "5860"
},
{
"name": "C#",
"bytes": "65555"
},
{
"name": "CSS",
"bytes": "2769"
}
],
"symlink_target": ""
} |
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>jQuery UI Widget - Default functionality</title>
<link rel="stylesheet" href="../../themes/base/jquery.ui.all.css">
<script src="../../jquery-1.9.1.js"></script>
<script src="../../ui/jquery.ui.core.js"></script>
<script src="../../ui/jquery.ui.position.js"></script>
<script src="../../ui/jquery.ui.widget.js"></script>
<script src="../../ui/jquery.ui.button.js"></script>
<link rel="stylesheet" href="../demos.css">
<style>
.custom-colorize {
font-size: 20px;
position: relative;
width: 75px;
height: 75px;
}
.custom-colorize-changer {
font-size: 10px;
position: absolute;
right: 0;
bottom: 0;
}
</style>
<script>
$(function() {
// the widget definition, where "custom" is the namespace,
// "colorize" the widget name
$.widget( "custom.colorize", {
// default options
options: {
red: 255,
green: 0,
blue: 0,
// callbacks
change: null,
random: null
},
// the constructor
_create: function() {
this.element
// add a class for theming
.addClass( "custom-colorize" )
// prevent double click to select text
.disableSelection();
this.changer = $( "<button>", {
text: "change",
"class": "custom-colorize-changer"
})
.appendTo( this.element )
.button();
// bind click events on the changer button to the random method
this._on( this.changer, {
// _on won't call random when widget is disabled
click: "random"
});
this._refresh();
},
// called when created, and later when changing options
_refresh: function() {
this.element.css( "background-color", "rgb(" +
this.options.red +"," +
this.options.green + "," +
this.options.blue + ")"
);
// trigger a callback/event
this._trigger( "change" );
},
// a public method to change the color to a random value
// can be called directly via .colorize( "random" )
random: function( event ) {
var colors = {
red: Math.floor( Math.random() * 256 ),
green: Math.floor( Math.random() * 256 ),
blue: Math.floor( Math.random() * 256 )
};
// trigger an event, check if it's canceled
if ( this._trigger( "random", event, colors ) !== false ) {
this.option( colors );
}
},
// events bound via _on are removed automatically
// revert other modifications here
_destroy: function() {
// remove generated elements
this.changer.remove();
this.element
.removeClass( "custom-colorize" )
.enableSelection()
.css( "background-color", "transparent" );
},
// _setOptions is called with a hash of all options that are changing
// always refresh when changing options
_setOptions: function() {
// _super and _superApply handle keeping the right this-context
this._superApply( arguments );
this._refresh();
},
// _setOption is called for each individual option that is changing
_setOption: function( key, value ) {
// prevent invalid color values
if ( /red|green|blue/.test(key) && (value < 0 || value > 255) ) {
return;
}
this._super( key, value );
}
});
// initialize with default options
$( "#my-widget1" ).colorize();
// initialize with two customized options
$( "#my-widget2" ).colorize({
red: 60,
blue: 60
});
// initialize with custom green value
// and a random callback to allow only colors with enough green
$( "#my-widget3" ).colorize( {
green: 128,
random: function( event, ui ) {
return ui.green > 128;
}
});
// click to toggle enabled/disabled
$( "#disable" ).click(function() {
// use the custom selector created for each widget to find all instances
// all instances are toggled together, so we can check the state from the first
if ( $( ":custom-colorize" ).colorize( "option", "disabled" ) ) {
$( ":custom-colorize" ).colorize( "enable" );
} else {
$( ":custom-colorize" ).colorize( "disable" );
}
});
// click to set options after initalization
$( "#black" ).click( function() {
$( ":custom-colorize" ).colorize( "option", {
red: 0,
green: 0,
blue: 0
});
});
});
</script>
</head>
<body>
<div>
<div id="my-widget1">color me</div>
<div id="my-widget2">color me</div>
<div id="my-widget3">color me</div>
<button id="disable">Toggle disabled option</button>
<button id="black">Go black</button>
</div>
<div class="demo-description">
<p>This demo shows a simple custom widget built using the widget
factory (jquery.ui.widget.js).</p>
<p>The three boxes are initialized in different ways. Clicking
them changes their background color. View source to see how it works,
its heavily commented</p>
</div>
</body>
</html>
| {
"content_hash": "bcadb64afa1880c1ef261f423b457b97",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 82,
"avg_line_length": 26.214285714285715,
"alnum_prop": 0.6174806120310208,
"repo_name": "lundskommun/resejamforaren",
"id": "6fd5239df570fb538c4580a143bca0559e66758d",
"size": "4771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rjweb/static/rjweb/lib/jquery-ui-1.10.3.custom/development-bundle/demos/widget/default.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108309"
},
{
"name": "HTML",
"bytes": "471078"
},
{
"name": "JavaScript",
"bytes": "832105"
},
{
"name": "Python",
"bytes": "8731"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
} |
#pragma once
#include "common/RhoStd.h"
#include "common/IRhoClassFactory.h"
#include "common/RhoMutexLock.h"
#include "net/INetRequest.h"
#include "ISyncProtocol.h"
#include "SyncSource.h"
#include "SyncNotify.h"
#include "db/DBAdapter.h"
#include "logging/RhoLog.h"
namespace rho {
namespace sync {
class CSyncEngine : public net::IRhoSession
{
DEFINE_LOGCLASS;
public:
enum ESyncState{ esNone, esSyncAllSources, esSyncSource, esSearch, esStop, esExit };
enum EBulkSyncState { ebsNotSynced = 0, ebsSynced = 1, ebsLoadBlobs = 2 };
struct CSourceID
{
String m_strName;
int m_nID;
CSourceID(int id, const String& strName ){ m_nID = id; m_strName = strName; }
CSourceID(const String& strName ){ m_strName = strName; }
String toString()const;
boolean isEqual(CSyncSource& src)const;
};
class CSourceOptions
{
common::CMutex m_mxSrcOptions;
HashtablePtr<int, Hashtable<String,String>* > m_hashSrcOptions;
public:
void setProperty(int nSrcID, const char* szPropName, const char* szPropValue);
String getProperty(int nSrcID, const char* szPropName);
boolean getBoolProperty(int nSrcID, const char* szPropName);
int getIntProperty(int nSrcID, const char* szPropName);
void clearProperties();
};
private:
VectorPtr<CSyncSource*> m_sources;
NetRequest m_NetRequest, m_NetRequestClientID;
common::CAutoPtr<ISyncProtocol> m_SyncProtocol;
ESyncState m_syncState;
String m_clientID;
common::CMutex m_mxLoadClientID, m_mxSessionID;
String m_strSession;
CSyncNotify m_oSyncNotify;
boolean m_bStopByUser;
int m_nSyncPageSize;
boolean m_bNoThreaded;
int m_nErrCode;
String m_strError;
boolean m_bIsSearch, m_bIsSchemaChanged;
static CSourceOptions m_oSourceOptions;
net::CNetRequestWrapper getNetClientID(){ return getNetRequest(&m_NetRequestClientID); }
public:
CSyncEngine();
~CSyncEngine(void){}
static CSourceOptions& getSourceOptions(){ return m_oSourceOptions; }
net::CNetRequestWrapper getNet(){ return getNetRequest(&m_NetRequest); }
void doSyncAllSources(const String& strQueryParams, boolean bSyncOnlyChangedSources);
void doSyncSource(const CSourceID& oSrcID, const String& strQueryParams);
void doSearch(rho::Vector<rho::String>& arSources, String strParams, const String& from, boolean bSearchSyncChanges, int nProgressStep);
void login(String name, String password, const CSyncNotification& oNotify);
boolean isLoggedIn();
String loadSession();
void logout();
void logout_int();
void setSyncServer(const char* syncserver);
String getSyncServer() const;
void setState(ESyncState eState){ m_syncState = eState; }
ESyncState getState()const{ return m_syncState; }
boolean isSearch()const{ return m_bIsSearch; }
boolean isContinueSync()const{ return m_syncState != esExit && m_syncState != esStop; }
boolean isSyncing()const{ return m_syncState == esSyncAllSources || m_syncState == esSyncSource || m_syncState == esSearch; }
void stopSync(){ if (isContinueSync()){ setState(esStop); m_NetRequest.cancel();m_NetRequestClientID.cancel();} }
void stopSyncByUser(){ m_bStopByUser = true; stopSync(); }
void exitSync(){ setState(esExit); m_NetRequest.cancel(); m_NetRequestClientID.cancel();}
boolean isStoppedByUser(){ return m_bStopByUser; }
void setSslVerifyPeer(boolean b);
bool getSslVerifyPeer();
//private:
String getClientID()const{ return m_clientID; }
void setSession(String strSession){m_strSession=strSession;}
boolean isSessionExist(){ return m_strSession.length() > 0; }
void setSchemaChanged(boolean bChanged){ m_bIsSchemaChanged = bChanged; }
boolean isSchemaChanged(){ return m_bIsSchemaChanged; }
//IRhoSession
virtual const String& getSession()
{
synchronized(m_mxSessionID){
return m_strSession;
}
}
virtual const String& getContentType(){ return getProtocol().getContentType();}
void loadAllSources();
void prepareSync(ESyncState eState, const CSourceID* oSrcID);
VectorPtr<CSyncSource*>& getSources(){ return m_sources; }
//int getStartSource();
String loadClientID();
String readClientID();
String requestClientIDByNet();
boolean resetClientIDByNet(const String& strClientID);//throws Exception
void doBulkSync();//throws Exception
CSyncNotify& getNotify(){ return m_oSyncNotify; }
ISyncProtocol& getProtocol(){ return *m_SyncProtocol; }
CSyncSource* findSourceByName(const String& strSrcName);
CSyncSource* findSourceById(int srcId);
int getSyncPageSize() { return m_nSyncPageSize; }
void setSyncPageSize(int nPageSize){ m_nSyncPageSize = nPageSize; }
boolean isNoThreadedMode(){ return m_bNoThreaded; }
void setNonThreadedMode(boolean b){m_bNoThreaded = b;}
void applyChangedValues(db::CDBAdapter& db);
private:
bool recoverSearch( const String& strUrl, const String& strBody, int& errorCode, String& strError, int nProgressStep );
CSyncSource* findSource(const CSourceID& oSrcID);
void loadBulkPartition(const String& strPartition);
String makeBulkDataFileName(String strDataUrl, String strDbPath, String strExt);
db::CDBAdapter& getUserDB(){ return db::CDBAdapter::getUserDB(); }
db::CDBAdapter& getDB(const String& strPartition){ return db::CDBAdapter::getDB(strPartition.c_str()); }
void initProtocol();
void processServerSources(String strSources);
void checkSourceAssociations();
void syncOneSource(int i, const String& strQueryParams, boolean bSyncOnlyIfChanged);
void syncAllSources(const String& strQueryParams, boolean bSyncOnlyChangedSources);
boolean processBlobs();
void loadBulkPartitions();
friend class CSyncSource;
};
}
}
| {
"content_hash": "07fcfd12b612564ebc065df5273bf494",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 141,
"avg_line_length": 35.16766467065868,
"alnum_prop": 0.7173505874340201,
"repo_name": "rhomobile/rhoconnect-client",
"id": "06a95a4c1b62cff80d949d4b85496e6961eb252d",
"size": "8310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ext/rhoconnect-client/ext/shared/sync/SyncEngine.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "323"
},
{
"name": "C",
"bytes": "16879"
},
{
"name": "C++",
"bytes": "376671"
},
{
"name": "CSS",
"bytes": "379578"
},
{
"name": "HTML",
"bytes": "771943"
},
{
"name": "Java",
"bytes": "77221"
},
{
"name": "JavaScript",
"bytes": "813415"
},
{
"name": "Objective-C",
"bytes": "142685"
},
{
"name": "QMake",
"bytes": "3528"
},
{
"name": "Ruby",
"bytes": "149788"
},
{
"name": "Shell",
"bytes": "330"
}
],
"symlink_target": ""
} |
Google-Says
===========
A party game where people guess what Google will suggest
| {
"content_hash": "77646dedc7b1eb6a05f599832760014c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 56,
"avg_line_length": 20.5,
"alnum_prop": 0.6951219512195121,
"repo_name": "atav32/Google-Says",
"id": "4a1c8cd914707778ca2ed8b4e1a1c97343b476a6",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
sudo docker push nilqed/fricas:latest
| {
"content_hash": "32b024bea01f528b58c816c84ad271cc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 38,
"alnum_prop": 0.8421052631578947,
"repo_name": "nilqed/fricas_docker",
"id": "96461a410a32e442f9e20c7eea0edd65abb96a21",
"size": "48",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fricas/push.sh",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13443"
},
{
"name": "Shell",
"bytes": "818"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>lens: Not compatible</title>
<link rel="shortcut icon" type="image/png" href="../../../../../favicon.png" />
<link href="../../../../../bootstrap.min.css" rel="stylesheet">
<link href="../../../../../bootstrap-custom.css" rel="stylesheet">
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet">
<script src="../../../../../moment.min.js"></script>
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="navbar navbar-default" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="../../../../.."><i class="fa fa-lg fa-flag-checkered"></i> Coq bench</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="../..">clean / released</a></li>
<li class="active"><a href="">8.13.0 / lens - 1.0.1+8.12</a></li>
</ul>
</div>
</div>
</div>
<div class="article">
<div class="row">
<div class="col-md-12">
<a href="../..">« Up</a>
<h1>
lens
<small>
1.0.1+8.12
<span class="label label-info">Not compatible</span>
</small>
</h1>
<p><em><script>document.write(moment("2021-04-16 16:56:55 +0000", "YYYY-MM-DD HH:mm:ss Z").fromNow());</script> (2021-04-16 16:56:55 UTC)</em><p>
<h2>Context</h2>
<pre># Packages matching: installed
# Name # Installed # Synopsis
base-bigarray base
base-threads base
base-unix base
conf-findutils 1 Virtual package relying on findutils
conf-gmp 3 Virtual package relying on a GMP lib system installation
coq 8.13.0 Formal proof management system
num 1.4 The legacy Num library for arbitrary-precision integer and rational arithmetic
ocaml 4.11.1 The OCaml compiler (virtual package)
ocaml-base-compiler 4.11.1 Official release 4.11.1
ocaml-config 1 OCaml Switch Configuration
ocamlfind 1.9.1 A library manager for OCaml
zarith 1.12 Implements arithmetic and logical operations over arbitrary-precision integers
# opam file:
opam-version: "2.0"
synopsis: "Generation of lenses for record datatypes"
maintainer: "gregory@bedrocksystems.com"
authors: [
"Gregory Malecha <gregory@bedrocksystems.com>"
]
homepage: "https://github.com/bedrocksystems/coq-lens"
dev-repo: "git://github.com/bedrocksystems/coq-lens.git"
bug-reports: "https://github.com/bedrocksystems/coq-lens/issues"
license: "LGPL2.1+BedRock"
build: [
[make "-j%{jobs}%"]
]
install: [
[make "install"]
]
depends: [
"ocaml"
"coq" {>= "8.12" & < "8.13~"}
"coq-metacoq-template" { = "1.0~beta1+8.12" }
]
tags: [
"logpath:Lens"
"date: 2020-11-18"
]
url {
src: "https://github.com/bedrocksystems/coq-lens/archive/v1.0.1.tar.gz"
checksum: "sha256=c1092aa89e885dd4abe1abc0605474440e8a763569be0accbbf6af4b129b3a91"
}
</pre>
<h2>Lint</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Dry install</h2>
<p>Dry install with the current Coq version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam install -y --show-action coq-lens.1.0.1+8.12 coq.8.13.0</code></dd>
<dt>Return code</dt>
<dd>5120</dd>
<dt>Output</dt>
<dd><pre>[NOTE] Package coq is already installed (current version is 8.13.0).
The following dependencies couldn't be met:
- coq-lens -> coq < 8.13~ -> ocaml < 4.10
base of this switch (use `--unlock-base' to force)
Your request can't be satisfied:
- No available version of coq satisfies the constraints
No solution found, exiting
</pre></dd>
</dl>
<p>Dry install without Coq/switch base, to test if the problem was incompatibility with the current Coq/OCaml version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam remove -y coq; opam install -y --show-action --unlock-base coq-lens.1.0.1+8.12</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Install dependencies</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Install</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Installation size</h2>
<p>No files were installed.</p>
<h2>Uninstall</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Missing removes</dt>
<dd>
none
</dd>
<dt>Wrong removes</dt>
<dd>
none
</dd>
</dl>
</div>
</div>
</div>
<hr/>
<div class="footer">
<p class="text-center">
<small>Sources are on <a href="https://github.com/coq-bench">GitHub</a>. © Guillaume Claret.</small>
</p>
</div>
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="../../../../../bootstrap.min.js"></script>
</body>
</html>
| {
"content_hash": "38662c822ca7c3de0e38c83e3c8cb5e1",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 157,
"avg_line_length": 39.78362573099415,
"alnum_prop": 0.5378509481111274,
"repo_name": "coq-bench/coq-bench.github.io",
"id": "2b93a2b5a740657d649017cb74a12f14b3ef683f",
"size": "6805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clean/Linux-x86_64-4.11.1-2.0.7/released/8.13.0/lens/1.0.1+8.12.html",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
<section class="container" data-ng-controller="CalendarsController" ng-init="find()">
<div class="page-header">
<h1>Request New Event</h1>
</div>
<div class="col-md-12">
<form class="form-horizontal" data-ng-submit="createMongo()" novalidate>
<fieldset>
<div class="form-group">
<label class="control-label" for="name">Name of the Event</label>
<div class="controls">
<input type="text" data-ng-model="name" id="name" class="form-control" placeholder="Event Title" required>
</div>
<!-- Enter the description of the app -->
<label class="control-label" for="description">Description of the Event</label>
<div class="controls">
<textarea rows="2" maxlength="512" type="text" data-ng-model="description" id="description" class="form-control" placeholder="Description" required>
</textarea>
</div>
<!-- Upload picture or give link -->
<label class="control-label" for="imageURL">Link to an App's image</label>
<div class="controls">
<input type="text" data-ng-model="imageURL" id="imageURL" class="form-control" placeholder="https://www.imageURL.com" required>
</div>
<!-- link to the app -->
<label class="control-label" for="appLink">Link to the Event</label>
<div class="controls">
<input type="text" data-ng-model="link" id="link" class="form-control" placeholder="https://www.eventpage.com" required>
</div>
<div style="display:inline-block; min-height:290px;">
<label class="control-label" for="date">Date of the Event</label>
<uib-datepicker data-ng-model="manageEvent.date" min-date="minDate" show-weeks="true" class="well well-sm" custom-class="getDayClass(date, mode)"></uib-datepicker>
</div>
<div>
<label class="control-label" for="date">Time the Event</label>
<uib-timepicker data-ng-model="manageEvent.date" hour-step="1" minute-step="15" show-meridian="true"><uib-timepicker>
</div>
</div>
<div class="form-group">
<input type="submit" class="btn btn-default">
</div>
<div data-ng-show="error" class="text-danger">
<strong data-ng-bind="error"></strong>
</div>
</fieldset>
</form>
</div>
</section> | {
"content_hash": "d77374a26fa4ea241f9a587527a0bbbd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 187,
"avg_line_length": 57.326530612244895,
"alnum_prop": 0.5122819508721965,
"repo_name": "CEN3031-7C/project",
"id": "0d707dae6890691bd1b9930807657aaaefa4be3b",
"size": "2809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/calendars/client/views/submit-event.client.view.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9377"
},
{
"name": "HTML",
"bytes": "106117"
},
{
"name": "JavaScript",
"bytes": "449292"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
} |
<?php
/**
* Created by PhpStorm.
*
* Duc-Anh LE (ducanh.ke@gmail.com)
*
* User: ducanh-ki
* Date: 3/13/17
* Time: 4:12 PM
*/
namespace CreativeDelta\User\Core\Domain\Entity;
class SessionLog
{
protected $id;
protected $datetime;
protected $previousHash;
protected $returnUrl;
protected $dataJson;
/**
* @return mixed
*/
public function getId()
{
return $this->id;
}
/**
* @param mixed $id
*/
public function setId($id)
{
$this->id = $id;
}
/**
* @return mixed
*/
public function getDatetime()
{
return $this->datetime;
}
/**
* @param mixed $datetime
*/
public function setDatetime($datetime)
{
$this->datetime = $datetime;
}
/**
* @return mixed
*/
public function getPreviousHash()
{
return $this->previousHash;
}
/**
* @param mixed $previousHash
*/
public function setPreviousHash($previousHash)
{
$this->previousHash = $previousHash;
}
/**
* @return mixed
*/
public function getReturnUrl()
{
return $this->returnUrl;
}
/**
* @param mixed $returnUrl
*/
public function setReturnUrl($returnUrl)
{
$this->returnUrl = $returnUrl;
}
/**
* @return mixed
*/
public function getDataJson()
{
return $this->dataJson;
}
/**
* @param mixed $dataJson
*/
public function setDataJson($dataJson)
{
$this->dataJson = $dataJson;
}
} | {
"content_hash": "e6be784511877377e461f37eebbaf9c2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 50,
"avg_line_length": 15.375,
"alnum_prop": 0.5209505941213258,
"repo_name": "GoPlan/user-register-authentication",
"id": "1549649a8c7790807f1cee2d81be5bd7206bd14a",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Core/Domain/Entity/SessionLog.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "748"
},
{
"name": "CSS",
"bytes": "931"
},
{
"name": "HTML",
"bytes": "13671"
},
{
"name": "PHP",
"bytes": "120156"
}
],
"symlink_target": ""
} |
<?php
// Get the PHP helper library from https://twilio.com/docs/libraries/php
require_once '/path/to/vendor/autoload.php'; // Loads the library
use Twilio\Rest\Client;
// Your Account Sid and Auth Token from twilio.com/user/account
// To set up environmental variables, see http://twil.io/secure
$sid = getenv('TWILIO_ACCOUNT_SID');
$token = getenv('TWILIO_AUTH_TOKEN');
$client = new Client($sid, $token);
// Get an object from its sid. If you do not have a sid,
// check out the list resource examples on this page
$ipAccessControlListMapping = $client->sip
->domains("SD32a3c49700934481addd5ce1659f04d2")
->ipAccessControlListMappings("AL95a47094615fe05b7c17e62a7877836c")
->fetch();
echo $ipAccessControlListMapping->friendlyName;
| {
"content_hash": "17687d070e28658bd0c4301b70d52966",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 39.526315789473685,
"alnum_prop": 0.7496671105193076,
"repo_name": "TwilioDevEd/api-snippets",
"id": "0df2f258f4228677ed1df6fb110678c1c2749775",
"size": "751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rest/sip-in/get-mappings-instance-old/get-mappings-instance.5.x.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
} |
/*************************************************************************/
/* video_stream.cpp */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* http://www.godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#include "video_stream_theoraplayer.h"
#include "core/os/file_access.h"
#include "include/theoraplayer/TheoraPlayer.h"
#include "include/theoraplayer/TheoraTimer.h"
#include "include/theoraplayer/TheoraAudioInterface.h"
#include "include/theoraplayer/TheoraDataSource.h"
#include "include/theoraplayer/TheoraException.h"
#include "core/ring_buffer.h"
#include "core/os/thread_safe.h"
#include "core/globals.h"
static TheoraVideoManager* mgr = NULL;
class TPDataFA : public TheoraDataSource {
FileAccess* fa;
String data_name;
public:
int read(void* output,int nBytes) {
if (!fa)
return -1;
return fa->get_buffer((uint8_t*)output, nBytes);
};
//! returns a string representation of the DataSource, eg 'File: source.ogg'
virtual std::string repr() {
return data_name.utf8().get_data();
};
//! position the source pointer to byte_index from the start of the source
virtual void seek(unsigned long byte_index) {
if (!fa)
return;
fa->seek(byte_index);
};
//! return the size of the stream in bytes
virtual unsigned long size() {
if (!fa)
return 0;
return fa->get_len();
};
//! return the current position of the source pointer
virtual unsigned long tell() {
if (!fa)
return 0;
return fa->get_pos();
};
TPDataFA(const String& p_path) {
fa = FileAccess::open(p_path, FileAccess::READ);
data_name = "File: " + p_path;
};
TPDataFA(FileAccess* p_fa, const String& p_path) {
fa = p_fa;
data_name = "File: " + p_path;
};
~TPDataFA() {
if (fa)
memdelete(fa);
};
};
class AudioStreamInput : public AudioStreamResampled {
_THREAD_SAFE_CLASS_;
int channels;
int freq;
RID stream_rid;
mutable RingBuffer<float> rb;
int rb_power;
int total_wrote;
bool playing;
bool paused;
public:
virtual void play() {
_THREAD_SAFE_METHOD_
_setup(channels, freq, 256);
stream_rid=AudioServer::get_singleton()->audio_stream_create(get_audio_stream());
AudioServer::get_singleton()->stream_set_active(stream_rid,true);
AudioServer::get_singleton()->stream_set_volume_scale(stream_rid,1);
playing = true;
paused = false;
};
virtual void stop() {
_THREAD_SAFE_METHOD_
AudioServer::get_singleton()->stream_set_active(stream_rid,false);
//_clear_stream();
playing=false;
_clear();
};
virtual bool is_playing() const { return true; };
virtual void set_paused(bool p_paused) { paused = p_paused; };
virtual bool is_paused(bool p_paused) const { return paused; };
virtual void set_loop(bool p_enable) {};
virtual bool has_loop() const { return false; };
virtual float get_length() const { return 0; };
virtual String get_stream_name() const { return "Theora Audio Stream"; };
virtual int get_loop_count() const { return 1; };
virtual float get_pos() const { return 0; };
virtual void seek_pos(float p_time) {};
virtual UpdateMode get_update_mode() const { return UPDATE_THREAD; };
virtual bool _can_mix() const { return true; };
void input(float* p_data, int p_samples) {
_THREAD_SAFE_METHOD_;
//printf("input %i samples from %p\n", p_samples, p_data);
if (rb.space_left() < p_samples) {
rb_power += 1;
rb.resize(rb_power);
}
rb.write(p_data, p_samples);
update(); //update too here for less latency
};
void update() {
_THREAD_SAFE_METHOD_;
int todo = get_todo();
int16_t* buffer = get_write_buffer();
int frames = rb.data_left()/channels;
const int to_write = MIN(todo, frames);
for (int i=0; i<to_write*channels; i++) {
int v = rb.read() * 32767;
int16_t sample = CLAMP(v,-32768,32767);
buffer[i] = sample;
};
write(to_write);
total_wrote += to_write;
};
int get_pending() const {
return rb.data_left();
};
int get_total_wrote() {
return total_wrote - (get_total() - get_todo());
};
AudioStreamInput(int p_channels, int p_freq) {
playing = false;
paused = true;
channels = p_channels;
freq = p_freq;
total_wrote = 0;
rb_power = 12;
rb.resize(rb_power);
};
~AudioStreamInput() {
stop();
};
};
class TPAudioGodot : public TheoraAudioInterface, TheoraTimer {
Ref<AudioStreamInput> stream;
int sample_count;
int channels;
int freq;
public:
void insertData(float* data, int nSamples) {
stream->input(data, nSamples);
};
TPAudioGodot(TheoraVideoClip* owner, int nChannels, int p_freq)
: TheoraAudioInterface(owner, nChannels, p_freq), TheoraTimer() {
printf("***************** audio interface constructor freq %i\n", p_freq);
channels = nChannels;
freq = p_freq;
stream = Ref<AudioStreamInput>(memnew(AudioStreamInput(nChannels, p_freq)));
stream->play();
sample_count = 0;
owner->setTimer(this);
};
void stop() {
stream->stop();
};
void update(float time_increase)
{
//mTime = (float)(stream->get_total_wrote()) / freq;
//mTime = MAX(0,mTime-AudioServer::get_singleton()->get_output_delay());
//mTime = (float)sample_count / channels / freq;
mTime += time_increase;
//float duration=mClip->getDuration();
//if (mTime > duration) mTime=duration;
//printf("time at timer is %f, %f, samples %i\n", mTime, time_increase, sample_count);
}
};
class TPAudioGodotFactory : public TheoraAudioInterfaceFactory {
public:
TheoraAudioInterface* createInstance(TheoraVideoClip* owner, int nChannels, int freq) {
printf("************** creating audio output\n");
TheoraAudioInterface* ta = new TPAudioGodot(owner, nChannels, freq);
return ta;
};
};
static TPAudioGodotFactory* audio_factory = NULL;
void VideoStreamTheoraplayer::stop() {
playing = false;
if (clip) {
clip->stop();
clip->seek(0);
};
started = true;
};
void VideoStreamTheoraplayer::play() {
if (clip)
playing = true;
};
bool VideoStreamTheoraplayer::is_playing() const {
return playing;
};
void VideoStreamTheoraplayer::set_paused(bool p_paused) {
paused = p_paused;
if (paused) {
clip->pause();
} else {
if (clip && playing && !started)
clip->play();
}
};
bool VideoStreamTheoraplayer::is_paused(bool p_paused) const {
return !playing;
};
void VideoStreamTheoraplayer::set_loop(bool p_enable) {
loop = p_enable;
};
bool VideoStreamTheoraplayer::has_loop() const {
return loop;
};
float VideoStreamTheoraplayer::get_length() const {
if (!clip)
return 0;
return clip->getDuration();
};
float VideoStreamTheoraplayer::get_pos() const {
if (!clip)
return 0;
return clip->getTimer()->getTime();
};
void VideoStreamTheoraplayer::seek_pos(float p_time) {
if (!clip)
return;
clip->seek(p_time);
};
int VideoStreamTheoraplayer::get_pending_frame_count() const {
if (!clip)
return 0;
TheoraVideoFrame* f = clip->getNextFrame();
return f ? 1 : 0;
};
void VideoStreamTheoraplayer::pop_frame(Ref<ImageTexture> p_tex) {
if (!clip)
return;
TheoraVideoFrame* f = clip->getNextFrame();
if (!f) {
return;
};
#ifdef GLES2_ENABLED
// RasterizerGLES2* r = RasterizerGLES2::get_singleton();
// r->_texture_set_data(p_tex, f->mBpp == 3 ? Image::Format_RGB : Image::Format_RGBA, f->mBpp, w, h, f->getBuffer());
#endif
float w=clip->getWidth(),h=clip->getHeight();
int imgsize = w * h * f->mBpp;
int size = f->getStride() * f->getHeight() * f->mBpp;
data.resize(imgsize);
{
DVector<uint8_t>::Write wr = data.write();
uint8_t* ptr = wr.ptr();
copymem(ptr, f->getBuffer(), imgsize);
}
/*
for (int i=0; i<h; i++) {
int dstofs = i * w * f->mBpp;
int srcofs = i * f->getStride() * f->mBpp;
copymem(ptr + dstofs, f->getBuffer() + dstofs, w * f->mBpp);
};
*/
Image frame = Image();
frame.create(w, h, 0, f->mBpp == 3 ? Image::FORMAT_RGB : Image::FORMAT_RGBA, data);
clip->popFrame();
if (p_tex->get_width() == 0) {
p_tex->create(frame.get_width(),frame.get_height(),frame.get_format(),Texture::FLAG_VIDEO_SURFACE|Texture::FLAG_FILTER);
p_tex->set_data(frame);
} else {
p_tex->set_data(frame);
};
};
/*
Image VideoStreamTheoraplayer::pop_frame() {
Image ret = frame;
frame = Image();
return ret;
};
*/
Image VideoStreamTheoraplayer::peek_frame() const {
return Image();
};
void VideoStreamTheoraplayer::update(float p_time) {
if (!mgr)
return;
if (!clip)
return;
if (!playing || paused)
return;
//printf("video update!\n");
if (started) {
if (clip->getNumReadyFrames() < 2) {
printf("frames not ready, returning!\n");
return;
};
started = false;
//printf("playing clip!\n");
clip->play();
} else if (clip->isDone()) {
playing = false;
};
mgr->update(p_time);
};
void VideoStreamTheoraplayer::set_audio_track(int p_idx) {
audio_track=p_idx;
if (clip)
clip->set_audio_track(audio_track);
}
void VideoStreamTheoraplayer::set_file(const String& p_file) {
FileAccess* f = FileAccess::open(p_file, FileAccess::READ);
if (!f || !f->is_open())
return;
if (!audio_factory) {
audio_factory = memnew(TPAudioGodotFactory);
};
if (mgr == NULL) {
mgr = memnew(TheoraVideoManager);
mgr->setAudioInterfaceFactory(audio_factory);
};
int track = GLOBAL_DEF("theora/audio_track", 0); // hack
if (p_file.find(".mp4") != -1) {
std::string file = p_file.replace("res://", "").utf8().get_data();
clip = mgr->createVideoClip(file, TH_RGBX, 2, false, track);
//clip->set_audio_track(audio_track);
memdelete(f);
} else {
TheoraDataSource* ds = memnew(TPDataFA(f, p_file));
try {
clip = mgr->createVideoClip(ds);
clip->set_audio_track(audio_track);
} catch (_TheoraGenericException e) {
printf("exception ocurred! %s\n", e.repr().c_str());
clip = NULL;
};
};
clip->pause();
started = true;
};
VideoStreamTheoraplayer::~VideoStreamTheoraplayer() {
stop();
//if (mgr) { // this should be a singleton or static or something
// memdelete(mgr);
//};
//mgr = NULL;
if (clip) {
mgr->destroyVideoClip(clip);
clip = NULL;
};
};
VideoStreamTheoraplayer::VideoStreamTheoraplayer() {
//mgr = NULL;
clip = NULL;
started = false;
playing = false;
paused = false;
loop = false;
audio_track=0;
};
RES ResourceFormatLoaderVideoStreamTheoraplayer::load(const String &p_path,const String& p_original_path) {
VideoStreamTheoraplayer *stream = memnew(VideoStreamTheoraplayer);
stream->set_file(p_path);
return Ref<VideoStreamTheoraplayer>(stream);
}
void ResourceFormatLoaderVideoStreamTheoraplayer::get_recognized_extensions(List<String> *p_extensions) const {
p_extensions->push_back("ogm");
p_extensions->push_back("ogv");
p_extensions->push_back("mp4");
}
bool ResourceFormatLoaderVideoStreamTheoraplayer::handles_type(const String& p_type) const {
return p_type=="VideoStream" || p_type == "VideoStreamTheoraplayer";
}
String ResourceFormatLoaderVideoStreamTheoraplayer::get_resource_type(const String &p_path) const {
String exl=p_path.extension().to_lower();
if (exl=="ogm" || exl=="ogv" || exl=="mp4")
return "VideoStream";
return "";
}
| {
"content_hash": "5b108e716bc3906bbb17f63f2642622a",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 122,
"avg_line_length": 23.42391304347826,
"alnum_prop": 0.6245939675174014,
"repo_name": "tomasy23/evertonkrosnodart",
"id": "62dee1336a7d3e9365b85fa48898cf148c56cd99",
"size": "12930",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "drivers/theoraplayer/video_stream_theoraplayer.cpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "6955"
},
{
"name": "Assembly",
"bytes": "372147"
},
{
"name": "C",
"bytes": "20628442"
},
{
"name": "C++",
"bytes": "12863935"
},
{
"name": "CSS",
"bytes": "7594"
},
{
"name": "GDScript",
"bytes": "56484"
},
{
"name": "Java",
"bytes": "647757"
},
{
"name": "JavaScript",
"bytes": "5802"
},
{
"name": "Makefile",
"bytes": "920"
},
{
"name": "Matlab",
"bytes": "2076"
},
{
"name": "Objective-C",
"bytes": "112245"
},
{
"name": "Objective-C++",
"bytes": "115322"
},
{
"name": "Perl",
"bytes": "1930423"
},
{
"name": "Python",
"bytes": "105642"
},
{
"name": "Shell",
"bytes": "3815"
},
{
"name": "eC",
"bytes": "3710"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>class RubyXL::Text - rubyXL 3.3.13</title>
<script type="text/javascript">
var rdoc_rel_prefix = "../";
</script>
<script src="../js/jquery.js"></script>
<script src="../js/darkfish.js"></script>
<link href="../css/fonts.css" rel="stylesheet">
<link href="../css/rdoc.css" rel="stylesheet">
<body id="top" role="document" class="class">
<nav role="navigation">
<div id="project-navigation">
<div id="home-section" role="region" title="Quick navigation" class="nav-section">
<h2>
<a href="../index.html" rel="home">Home</a>
</h2>
<div id="table-of-contents-navigation">
<a href="../table_of_contents.html#pages">Pages</a>
<a href="../table_of_contents.html#classes">Classes</a>
<a href="../table_of_contents.html#methods">Methods</a>
</div>
</div>
<div id="search-section" role="search" class="project-section initially-hidden">
<form action="#" method="get" accept-charset="utf-8">
<div id="search-field-wrapper">
<input id="search-field" role="combobox" aria-label="Search"
aria-autocomplete="list" aria-controls="search-results"
type="text" name="search" placeholder="Search" spellcheck="false"
title="Type to search, Up and Down to navigate, Enter to load">
</div>
<ul id="search-results" aria-label="Search Results"
aria-busy="false" aria-expanded="false"
aria-atomic="false" class="initially-hidden"></ul>
</form>
</div>
</div>
<div id="class-metadata">
<div id="parent-class-section" class="nav-section">
<h3>Parent</h3>
<p class="link"><a href="OOXMLObject.html">RubyXL::OOXMLObject</a>
</div>
<!-- Method Quickref -->
<div id="method-list-section" class="nav-section">
<h3>Methods</h3>
<ul class="link-list" role="directory">
<li ><a href="#method-i-before_write_xml">#before_write_xml</a>
<li ><a href="#method-i-to_s">#to_s</a>
</ul>
</div>
</div>
</nav>
<main role="main" aria-labelledby="class-RubyXL::Text">
<h1 id="class-RubyXL::Text" class="class">
class RubyXL::Text
</h1>
<section class="description">
<p><a
href="http://www.schemacentral.com/sc/ooxml/e-ssml_t-1.html">www.schemacentral.com/sc/ooxml/e-ssml_t-1.html</a></p>
</section>
<section id="5Buntitled-5D" class="documentation-section">
<section class="constants-list">
<header>
<h3>Constants</h3>
</header>
<dl>
<dt id="ESCAPED_UNICODE">ESCAPED_UNICODE
<dd>
<dt id="INVALID_XML10_CHARS">INVALID_XML10_CHARS
<dd><p><a
href="http://www.w3.org/TR/REC-xml/#NT-Char">www.w3.org/TR/REC-xml/#NT-Char</a>:
Char ::= x9 | xA | xD | [#x20-#xD7FF] | [#xE000-#xFFFD] |
[#x10000-#x10FFFF]</p>
</dl>
</section>
<section id="public-instance-5Buntitled-5D-method-details" class="method-section">
<header>
<h3>Public Instance Methods</h3>
</header>
<div id="method-i-before_write_xml" class="method-detail ">
<div class="method-heading">
<span class="method-name">before_write_xml</span><span
class="method-args">()</span>
<span class="method-click-advice">click to toggle source</span>
</div>
<div class="method-description">
<div class="method-source-code" id="before_write_xml-source">
<pre><span class="ruby-comment"># File lib/rubyXL/objects/text.rb, line 20</span>
<span class="ruby-keyword">def</span> <span class="ruby-identifier">before_write_xml</span>
<span class="ruby-identifier">preserve_whitespace</span>
<span class="ruby-keyword">self</span>.<span class="ruby-identifier">value</span>.<span class="ruby-identifier">gsub!</span>(<span class="ruby-constant">INVALID_XML10_CHARS</span>) { <span class="ruby-operator">|</span><span class="ruby-identifier">c</span><span class="ruby-operator">|</span> <span class="ruby-string">"_x%04x_"</span> <span class="ruby-operator">%</span> <span class="ruby-identifier">c</span>.<span class="ruby-identifier">ord</span> }
<span class="ruby-keyword">true</span>
<span class="ruby-keyword">end</span></pre>
</div>
</div>
</div>
<div id="method-i-to_s" class="method-detail ">
<div class="method-heading">
<span class="method-name">to_s</span><span
class="method-args">()</span>
<span class="method-click-advice">click to toggle source</span>
</div>
<div class="method-description">
<div class="method-source-code" id="to_s-source">
<pre><span class="ruby-comment"># File lib/rubyXL/objects/text.rb, line 26</span>
<span class="ruby-keyword">def</span> <span class="ruby-identifier">to_s</span>
<span class="ruby-identifier">value</span>.<span class="ruby-identifier">to_s</span>.<span class="ruby-identifier">gsub</span>(<span class="ruby-constant">ESCAPED_UNICODE</span>) { <span class="ruby-operator">|</span><span class="ruby-identifier">m</span><span class="ruby-operator">|</span> <span class="ruby-node">$1</span>.<span class="ruby-identifier">hex</span>.<span class="ruby-identifier">chr</span>(<span class="ruby-constant">Encoding</span><span class="ruby-operator">::</span><span class="ruby-constant">UTF_8</span>) }
<span class="ruby-keyword">end</span></pre>
</div>
</div>
</div>
</section>
</section>
</main>
<footer id="validator-badges" role="contentinfo">
<p><a href="http://validator.w3.org/check/referer">Validate</a>
<p>Generated by <a href="http://docs.seattlerb.org/rdoc/">RDoc</a> 4.2.0.
<p>Based on <a href="http://deveiate.org/projects/Darkfish-RDoc/">Darkfish</a> by <a href="http://deveiate.org">Michael Granger</a>.
</footer>
| {
"content_hash": "e0d2dfa879b676c2441d1b2567c586d9",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 533,
"avg_line_length": 28.706976744186047,
"alnum_prop": 0.5907323395981854,
"repo_name": "99cm/rubyXL",
"id": "c3cbdd7e198864f33559e5161ed0a1c1866787a9",
"size": "6172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdoc/RubyXL/Text.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15702"
},
{
"name": "HTML",
"bytes": "1687453"
},
{
"name": "JavaScript",
"bytes": "17924"
},
{
"name": "Ruby",
"bytes": "383426"
}
],
"symlink_target": ""
} |
const { optimize: { OccurrenceOrderPlugin } } = require('webpack')
const UglifyJsPlugin = require('uglifyjs-webpack-plugin')
const merge = require('webpack-merge')
const { config } = require('./shared.config')
module.exports = merge(config, {
profile: true,
cache: false,
watch: false,
stats: {
assets: true,
chunks: false,
chunkModules: false,
modules: false,
reasons: false,
source: false,
},
entry: {
comicchisel: ['babel-polyfill', 'index'],
},
performance: {
hints: 'error',
maxAssetSize: 500000,
maxEntrypointSize: 1000000,
},
plugins: [
new OccurrenceOrderPlugin(true),
new UglifyJsPlugin({
compress: { warnings: false },
}),
],
})
| {
"content_hash": "db48c4a65aadd53faf8cfba5adaf73fa",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 66,
"avg_line_length": 18.641025641025642,
"alnum_prop": 0.6286107290233838,
"repo_name": "comicchisel/frontend",
"id": "e06d00b5fb14a04e01f8bcebc2b70ce8bf48aa11",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webpack/production.config.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1555"
},
{
"name": "JavaScript",
"bytes": "15264"
}
],
"symlink_target": ""
} |
<ion-view view-title="Select Route">
<div class="bar bar-header bar-positive">
<h1 class="title">Ferry Tracker</h1>
</div>
<ion-content class="padding">
<div class="list tracker-list">
<label class="item item-input item-select">
<div class="input-label">Select Route</div>
<select ng-model = "data.selectedRoute">
<option ng-repeat="(routeId, route) in routes" value="{{routeId}}">{{routeId}}</option>
</select>
</label>
</div>
<a href="#/startNavigating" class="button button-block button-stable" ng-click="startInterval();">Share Location</a>
</ion-content>
</ion-view> | {
"content_hash": "fa5bc9aff274a072371c6a7f48fe1803",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 120,
"avg_line_length": 40.8125,
"alnum_prop": 0.6202143950995406,
"repo_name": "palsingh/hackathon",
"id": "b8660cc4dcf7fb6c34de420d76b69b23959dbff6",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ferryDriver/www/templates/form.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1035423"
},
{
"name": "HTML",
"bytes": "10758"
},
{
"name": "JavaScript",
"bytes": "5318636"
}
],
"symlink_target": ""
} |
angular.module('thingsServices', ['ngResource']).
factory('Things', function($resource) {
return $resource('things/all.json', {}, {
query: {method:'GET', isArray:true}
});
}).
factory('ThingsByTag', function($resource) {
return $resource('things/byTag/:tagTitle.json', {}, {
query: {method:'GET', isArray:true}
});
}).
factory('Thing', function($resource) {
return $resource('things/:thingId.json', {}, {
query: {method:'GET', isArray:false}
});
});
angular.module('things', ['thingsServices']).
config(['$routeProvider', function($routeProvider) {
$routeProvider.
when('/', {templateUrl: 'partials/things.html', controller: ThingsCtrl}).
when('/things/bytag/:tagTitle', {templateUrl: 'partials/things.html', controller: ThingsByTagCtrl}).
when('/things/:thingId', {templateUrl: 'partials/thing.html', controller: ThingCtrl}).
otherwise({redirectTo: '/'});
}]); | {
"content_hash": "631881d4e44d0dc94a5f91b9dd731a85",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 106,
"avg_line_length": 38,
"alnum_prop": 0.6294736842105263,
"repo_name": "Horsed/learning",
"id": "5c8bdba4d24f8f9c1de7adf388be04749a637ce2",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "namethatthing/angular.js/js/services.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7533"
},
{
"name": "Groovy",
"bytes": "3836"
},
{
"name": "Java",
"bytes": "65616"
},
{
"name": "JavaScript",
"bytes": "832289"
}
],
"symlink_target": ""
} |
* Timestamp service
* Header parser service
* URL shortener service
* Image Search Abstraction Layer
* File MetaData service
| {
"content_hash": "a0c1e232c0a7653ccfd657d371ff4182",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 25,
"alnum_prop": 0.8,
"repo_name": "szib/fcc-microservices",
"id": "27f5ea2ceb65f4a915aac10622a85eacca9dad0d",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "984"
},
{
"name": "JavaScript",
"bytes": "9968"
}
],
"symlink_target": ""
} |
// ***********************************************************************
// Assembly : MetroFramework.Design
// Author : velez
// Created : 12-09-2015
//
// Last Modified By : velez
// Last Modified On : 12-08-2015
// ***********************************************************************
// <copyright file="MetroPanelDesigner.cs" company="MetroFrameworkDesignAssembly.Company">
// Copyright (c) Garmin International. All rights reserved.
// </copyright>
// <summary></summary>
// ***********************************************************************
using System;
using System.Collections.Generic;
using System.Text;
using System.Windows.Forms;
using MetroFramework.Controls;
#pragma warning disable 1587
/// <summary>
/// The Controls namespace.
/// </summary>
#pragma warning restore 1587
namespace MetroFramework.Design.Controls
{
/// <summary>
/// Class MetroPanelDesigner.
/// </summary>
internal class MetroPanelDesigner : System.Windows.Forms.Design.ParentControlDesigner
{
/// <summary>
/// Initializes the designer with the specified component.
/// </summary>
/// <param name="component">The <see cref="T:System.ComponentModel.IComponent" /> to associate with the designer.</param>
public override void Initialize(System.ComponentModel.IComponent component)
{
base.Initialize(component);
if (this.Control is MetroPanel)
{
//this.EnableDesignMode(((MetroPanel)this.Control).ScrollablePanel, "ScrollablePanel");
}
}
}
}
| {
"content_hash": "2cf0f93394295e2dca342711bd9e2aca",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 129,
"avg_line_length": 33.625,
"alnum_prop": 0.5656753407682775,
"repo_name": "barecool/winforms-modernui",
"id": "7e100f4343ae85b70173b32339b5675c6ac1950b",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MetroFramework.Design/Controls/MetroPanelDesigner.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1539"
},
{
"name": "C#",
"bytes": "2040206"
}
],
"symlink_target": ""
} |
// Type definitions for @ag-grid-community/core v25.0.1
// Project: http://www.ag-grid.com/
// Definitions by: Niall Crosby <https://github.com/ag-grid/>
import { RowNode } from "../entities/rowNode";
export interface RowBounds {
rowTop: number;
rowHeight: number;
rowIndex?: number;
}
export interface IRowModel {
/** Returns the rowNode at the given index. */
getRow(index: number): RowNode | null;
/** Returns the rowNode for given id. */
getRowNode(id: string): RowNode | null;
/** This is legacy, not used by ag-Grid, but keeping for backward compatibility */
getRowCount(): number;
getTopLevelRowCount(): number;
getTopLevelRowDisplayedIndex(topLevelIndex: number): number;
/** Returns the row index at the given pixel */
getRowIndexAtPixel(pixel: number): number;
/** Returns total height of all the rows - used to size the height of the grid div that contains the rows */
getCurrentPageHeight(): number;
/** Returns true if the provided rowNode is in the list of rows to render */
isRowPresent(rowNode: RowNode): boolean;
/** Returns row top and bottom for a given row */
getRowBounds(index: number): RowBounds | null;
/** Returns true if this model has no rows, regardless of model filter. EG if rows present, but filtered
* out, this still returns false. If it returns true, then the grid shows the 'no rows' overlay - but we
* don't show that overlay if the rows are just filtered out. */
isEmpty(): boolean;
/** Returns true if no rows (either no rows at all, or the rows are filtered out). This is what the grid
* uses to know if there are rows to render or not. */
isRowsToRender(): boolean;
/** Returns all rows in range that should be selected. If there is a gap in range (non ClientSideRowModel) then
* then no rows should be returned */
getNodesInRangeForSelection(first: RowNode, last: RowNode): RowNode[];
/** Iterate through each node. What this does depends on the model type. For clientSide, goes through
* all nodes. For pagination, goes through current page. For virtualPage, goes through what's loaded in memory. */
forEachNode(callback: (rowNode: RowNode, index: number) => void): void;
/** The base class returns the type. We use this instead of 'instanceof' as the client might provide
* their own implementation of the models in the future. */
getType(): string;
/**
* It tells us if this row model knows about the last row that it can produce. This is used by the
* PaginationPanel, if last row is not found, then the 'last' button is disabled and the last page is
* not shown. This is always true for ClientSideRowModel. It toggles for InfiniteRowModel.
*/
isLastRowFound(): boolean;
/** Used by CSRM only - is makes sure there are now estimated row heights within the range. */
ensureRowHeightsValid(startPixel: number, endPixel: number, startLimitIndex: number, endLimitIndex: number): boolean;
/** Gets called after grid is initialised. What happens depends on row model. Client Side will take rowData
* from gridOptions, the other row models will start calling their datasources. */
start(): void;
}
| {
"content_hash": "468bb9c85852d920a8476a99ba9a2acd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 121,
"avg_line_length": 59.870370370370374,
"alnum_prop": 0.7039901020723786,
"repo_name": "ceolter/angular-grid",
"id": "e2783f5dbc781479104fedcc8af45cd20e065085",
"size": "3233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "community-modules/core/dist/es6/interfaces/iRowModel.d.ts",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67272"
},
{
"name": "JavaScript",
"bytes": "2291855"
},
{
"name": "TypeScript",
"bytes": "671875"
}
],
"symlink_target": ""
} |
@implementation BlueButton
-(instancetype)initWithFrame:(CGRect)frame{
if(self = [super initWithFrame:frame]){
self.backgroundColor = [UIColor blueColor];
[self setTitle:@"它" forState:UIControlStateNormal];
NSLog(@"你点了绿色按钮");
}
return self;
}
@end
| {
"content_hash": "6a851fadad0b51909cbd9ade1551ae85",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 21.428571428571427,
"alnum_prop": 0.6333333333333333,
"repo_name": "CN-CJ/ExtractViewFromVC",
"id": "403d0bc610783799fd82d46becea74fde282ea7b",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "将装配View分离出控制器/BlueButton.m",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "9736"
}
],
"symlink_target": ""
} |
package com.att.research.xacml.std.datatypes;
import java.text.ParseException;
import com.att.research.xacml.api.SemanticString;
/**
* ISO8601Duration implements the ISO8601 duration data type with parsers from strings and to strings.
*
* @author car
* @version $Revision: 1.2 $
*/
public class ISO8601Duration implements SemanticString {
private static final String INVALID_DURATION_STRING = "Invalid ISO8601 duration string \"";
private static final String POSITION_STRING = "\" at position ";
private static int getChunkOrder(boolean sawT, char chunkName) {
switch(chunkName) {
case 'Y':
return 1;
case 'M':
return (!sawT ? 2 : 5);
case 'D':
return 3;
case 'H':
return 4;
case 'S':
return 6;
default:
return -1;
}
}
private static class DurationChunk {
private char chunkName;
private double chunkValue;
private int length;
public DurationChunk(char name, double value, int lengthIn) {
this.chunkName = name;
this.chunkValue = value;
this.length = lengthIn;
}
public char getChunkName() {
return this.chunkName;
}
public double getChunkValue() {
return this.chunkValue;
}
public int getLength() {
return this.length;
}
public boolean isIntValue() {
return Math.floor(this.getChunkValue()) == this.getChunkValue();
}
public int getIntValue() {
return (int)Math.floor(this.getChunkValue());
}
public static DurationChunk nextChunk(String duration, int startPos) throws ParseException {
if (duration == null || startPos >= duration.length()) {
return null;
}
int curPos = startPos;
int endPos = duration.length();
int dotCount = 0;
char charAt;
while (curPos < endPos && ((charAt = duration.charAt(curPos)) == '.' || Character.isDigit(charAt))) {
if (charAt == '.') {
dotCount++;
}
curPos++;
}
if (curPos < endPos && dotCount <= 1) {
char chunkName = duration.charAt(curPos);
Double dvalue = null;
try {
dvalue = Double.parseDouble(duration.substring(startPos, curPos));
} catch (NumberFormatException ex) {
throw new ParseException("Invalid chunk \"" + duration + POSITION_STRING + startPos, startPos);
}
curPos++;
return new DurationChunk(chunkName, dvalue, (curPos - startPos));
} else {
throw new ParseException("Invalid chunk \"" + duration + POSITION_STRING + startPos, curPos);
}
}
}
private int durationSign = 1;
private int years;
private int months;
private int days;
private int hours;
private int minutes;
private int seconds;
private int millis;
/**
* Creates a duration with the given values.
*
* @param durationSignIn the sign of the duration
* @param yearsIn the number of years
* @param monthsIn the number of months
* @param daysIn the number of days
* @param hoursIn the number of hours
* @param minutesIn the number of minutes
* @param secondsIn the number of fractional seconds
*/
public ISO8601Duration(int durationSignIn, int yearsIn, int monthsIn, int daysIn, int hoursIn, int minutesIn, double secondsIn) {
this.durationSign = durationSignIn;
this.years = yearsIn;
this.months = monthsIn;
this.days = daysIn;
this.hours = hoursIn;
this.minutes = minutesIn;
this.seconds = (int) Math.floor(secondsIn);
this.millis = (int) Math.floor(((secondsIn * 1000) - (this.seconds * 1000)));
}
/**
* Creates a new <code>ISO8601Duration</code> by parsing the given <code>String</code>.
*
* @param iso8601DurationString the ISO8601 duration <code>String</code>
* @return a new <code>ISO8601Duration</code> parsed from the given <code>String</code>
* @throws ParseException
*/
public static ISO8601Duration newInstance(String iso8601DurationString) throws ParseException {
if (iso8601DurationString == null || iso8601DurationString.length() == 0) {
return null;
}
int curPos = 0;
int endPos = iso8601DurationString.length();
int durationSign = 1;
int years = 0;
int months = 0;
int days = 0;
int hours = 0;
int minutes = 0;
double fractionalSeconds = 0.0;
if (iso8601DurationString.charAt(curPos) == '-') {
durationSign = -1;
curPos++;
}
if (iso8601DurationString.charAt(curPos) != 'P') {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + POSITION_STRING + curPos, curPos);
}
curPos++;
if (curPos >= endPos) {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + "\": No duration components following P", curPos);
}
int lastChunkOrder = 0;
boolean sawT = false;
while (curPos < endPos) {
/*
* Look for the Time divider character
*/
if (iso8601DurationString.charAt(curPos) == 'T') {
if (sawT) {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + POSITION_STRING + curPos + ": saw multiple T separators", curPos);
} else {
sawT = true;
}
curPos++;
} else {
DurationChunk durationChunk = DurationChunk.nextChunk(iso8601DurationString, curPos);
/*
* Check for unknown chunks or out of order chunks
*/
int chunkOrder = getChunkOrder(sawT, durationChunk.getChunkName());
if (chunkOrder <= 0) {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + POSITION_STRING + curPos + ": invalid component", curPos);
} else if (chunkOrder <= lastChunkOrder) {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + POSITION_STRING + curPos + ": out of order component", curPos);
}
lastChunkOrder = chunkOrder;
/*
* Check for correct value type
*/
if (durationChunk.getChunkName() != 'S' && !durationChunk.isIntValue()) {
throw new ParseException(INVALID_DURATION_STRING + iso8601DurationString + POSITION_STRING + curPos + ": expected int value", curPos);
}
/*
* Assign the value to the right component
*/
switch(durationChunk.getChunkName()) {
case 'Y':
years = durationChunk.getIntValue();
break;
case 'M':
if (!sawT) {
months = durationChunk.getIntValue();
} else {
minutes = durationChunk.getIntValue();
}
break;
case 'D':
days = durationChunk.getIntValue();
break;
case 'H':
hours = durationChunk.getIntValue();
break;
case 'S':
fractionalSeconds = durationChunk.getChunkValue();
break;
default:
assert(false);
break;
}
/*
* Advance the current position
*/
curPos += durationChunk.getLength();
}
}
return new ISO8601Duration(durationSign, years, months, days, hours, minutes, fractionalSeconds);
}
public int getDurationSign() {
return this.durationSign;
}
public int getYears() {
return this.years;
}
public int getMonths() {
return this.months;
}
public int getDays() {
return this.days;
}
public int getHours() {
return this.hours;
}
public int getMinutes() {
return this.minutes;
}
public int getSeconds() {
return this.seconds;
}
public int getMillis() {
return this.millis;
}
public double getFractionalSecs() {
return (double)(this.seconds) + (( (double)this.millis) / 1000);
}
@Override
public int hashCode() {
return this.getDurationSign() + this.getYears() + this.getMonths() + this.getDays() + this.getHours() + this.getMinutes() + this.getSeconds() + this.getMillis();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ISO8601Duration)) {
return false;
} else if (obj == this) {
return true;
} else {
ISO8601Duration iso8601Duration = (ISO8601Duration)obj;
return (this.getDurationSign() == iso8601Duration.getDurationSign() &&
this.getYears() == iso8601Duration.getYears() &&
this.getMonths() == iso8601Duration.getMonths() &&
this.getDays() == iso8601Duration.getDays() &&
this.getHours() == iso8601Duration.getHours() &&
this.getMinutes() == iso8601Duration.getMinutes() &&
this.getSeconds() == iso8601Duration.getSeconds() &&
this.getMillis() == iso8601Duration.getMillis()
);
}
}
@Override
public String stringValue() {
StringBuilder stringBuilder = new StringBuilder();
if (this.getDurationSign() < 0) {
stringBuilder.append("-P");
} else {
stringBuilder.append("P");
}
boolean sawOne = false;
if (this.getYears() > 0) {
stringBuilder.append(this.getYears());
stringBuilder.append('Y');
sawOne = true;
}
if (this.getMonths() > 0) {
stringBuilder.append(this.getMonths());
stringBuilder.append('M');
sawOne = true;
}
if (this.getDays() > 0) {
stringBuilder.append(this.getDays());
stringBuilder.append('D');
sawOne = true;
}
if (this.getHours() > 0 || this.getMinutes() > 0 || this.getSeconds() > 0 || this.getMillis() > 0) {
stringBuilder.append('T');
if (this.getHours() > 0) {
stringBuilder.append(this.getHours());
stringBuilder.append('H');
}
if (this.getMinutes() > 0) {
stringBuilder.append(this.getMinutes());
stringBuilder.append('M');
}
double dSeconds = this.getFractionalSecs();
if (dSeconds > 0) {
if (dSeconds == Math.floor(dSeconds)) {
stringBuilder.append((int)dSeconds);
} else {
stringBuilder.append(dSeconds);
}
stringBuilder.append('S');
}
} else if (!sawOne) {
stringBuilder.append("T0S");
}
return stringBuilder.toString();
}
@Override
public String toString() {
return "{" +
"durationSign=" + this.getDurationSign() +
"years=" + this.getYears() +
"months=" + this.getMonths() +
"days=" + this.getDays() +
"hours=" + this.getHours() +
"minutes=" + this.getMinutes() +
"seconds=" + this.getSeconds() +
"millis=" + this.getMillis() +
"}";
}
}
| {
"content_hash": "238b1d961c8e994d6de76232b7e331c0",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 163,
"avg_line_length": 28.192528735632184,
"alnum_prop": 0.6540617674039344,
"repo_name": "att/XACML",
"id": "45cbf932caa488b76743f82d227ab1649f31e556",
"size": "9933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "XACML/src/main/java/com/att/research/xacml/std/datatypes/ISO8601Duration.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1492"
},
{
"name": "HTML",
"bytes": "193016"
},
{
"name": "Java",
"bytes": "4635186"
},
{
"name": "PLpgSQL",
"bytes": "198552"
},
{
"name": "TSQL",
"bytes": "568124"
}
],
"symlink_target": ""
} |
module microcoverage
## Utility function to construct an Expr from head and args.
function mkex(head::Symbol, args...)
v = Expr(head)
for arg in args
push!(v.args, arg)
end
v
end
## We annotate the source file with a sequence of ints and strings.
typealias AnnotationIndexType Array{Union(Int,ASCIIString), 1}
## Constructor for an annotation index.
initannotationindex() = (Union(Int,ASCIIString))[]
# This dictionary maps source file names to their
# annotation indices.
const annotationindexdict = Dict{ASCIIString, AnnotationIndexType}()
# Trackarray holds the counters that track the line
# numbers and calls.
const trackarray = (Int)[]
## incrtrackarray increments the appropriate entry of the
## track array
function incrtrackarray(subscr)
global trackarray::Array{Int,1}
trackarray[subscr] += 1
nothing
end
## makecallinst: Generate an expression that corresponds to the
## statement
## Main.microcoverage.incrtrackarray(trknum)
makecallinst(trknum::Int) =
mkex(:call,
mkex(:.,
mkex(:.,
:Main,
QuoteNode(:microcoverage)),
QuoteNode(:incrtrackarray)),
trknum)
## coverage_rewrite_recursive
## Take an expression inprogram (could be a whole program),
## the name of the tracking array, and an initial position
## in the tracking array.
##
## Produce an outprogram, which is the rewritten inprogram
## with calls to the coverage-statement incrementers.
## Also return the updated trknum (position in the tracking array).
## As a side effect, generate annotationindex, which tracks what should
## be printed in the output after coverage checking is complete.
##
## There are three versions of routine; the correct one is
## selected by multiple dispatch on the first argument.
function coverage_rewrite_recursive(inprogram::LineNumberNode,
starttrknum::Int,
linenumberoffset::Int,
annotationindex::AnnotationIndexType)
# If the argument is of type LineNumberNode, i.e.,
# a new line number in the source code,
# replace it with a block that consists of the
# the line number and a tracking array incrementer
trknum = starttrknum
newline = inprogram.line + linenumberoffset
push!(annotationindex, -newline, trknum)
outprogram = mkex(:block,
deepcopy(inprogram),
makecallinst(trknum))
trknum += 1
return outprogram, trknum
end
function coverage_rewrite_recursive(inprogram::Any,
starttrknum::Int,
linenumberoffset::Int,
::AnnotationIndexType)
## This is the default version of coverage_rewrite_recursive
## that doesn't do anything
return inprogram, starttrknum
end
function coverage_rewrite_recursive(inprogram::Expr,
starttrknum::Int,
linenumberoffset::Int,
annotationindex::AnnotationIndexType)
## This is the primary version of coverage_rewrite
## recursive. It takes an expression and inserts
## tracking statements for line numbers and internal branches
## in expressions.
trknum = starttrknum
if inprogram.head == :line
# If the expression is an expression of type :line, i.e.,
# a new line number in the source code,
# replace it with a block that consists of the
# the line number and a tracking array incrementer
newline = inprogram.args[1] + linenumberoffset
push!(annotationindex, -newline, trknum)
outprogram = mkex(:block,
deepcopy(inprogram),
makecallinst(trknum))
trknum += 1
elseif inprogram.head == :if && (!(typeof(inprogram.args[2]) <: Expr) ||
inprogram.args[2].head != :block)
# If the expression is of the form a? b : c
# then generate tracking statements for b and c
outprogram = Expr(:if)
outprogram1, trknum = coverage_rewrite_recursive(inprogram.args[1],
trknum,
linenumberoffset,
annotationindex)
push!(outprogram.args, outprogram1)
push!(annotationindex, " ? ")
@assert(length(inprogram.args) == 3)
for k = 2 : 3
if k > 2
push!(annotationindex, " : ")
end
a2 = inprogram.args[k]
push!(annotationindex, "(", trknum)
callinst = makecallinst(trknum)
trknum += 1
outprogram1, trknum = coverage_rewrite_recursive(a2,
trknum,
linenumberoffset,
annotationindex)
push!(outprogram.args, mkex(:block,
callinst,
outprogram1))
push!(annotationindex, ")")
end
elseif inprogram.head == :(=) && typeof(inprogram.args[1]) <: Expr &&
inprogram.args[1].head == :call && inprogram.args[1].args[1] != :eval
# If the line is a statement-function definition other than the
# definition of "eval", then insert
# a tracking statement into the function body.
@assert length(inprogram.args) == 2
outprogram1, trknum = coverage_rewrite_recursive(inprogram.args[1],
trknum,
linenumberoffset,
annotationindex)
savetrknum = trknum
callinst = makecallinst(trknum)
trknum += 1
outprogram2, trknum = coverage_rewrite_recursive(inprogram.args[2],
trknum,
linenumberoffset,
annotationindex)
push!(annotationindex, "(called", savetrknum, "time(s))")
outprogram = mkex(:(=), outprogram1, mkex(:block,
callinst,
outprogram2))
elseif inprogram.head == :|| || inprogram.head == :&&
## If the expression is the || or && operator, generate
## a tracking statement for each branch.
@assert length(inprogram.args) == 2
outprogram = Expr(inprogram.head)
for k = 1 : 2
callinst = makecallinst(trknum)
push!(annotationindex, "(", trknum)
trknum += 1
outprogram1, trknum = coverage_rewrite_recursive(inprogram.args[k],
trknum,
linenumberoffset,
annotationindex)
push!(outprogram.args, mkex(:block,
callinst,
outprogram1))
push!(annotationindex, ")")
if k == 1
if inprogram.head == :||
push!(annotationindex, " || ")
else
push!(annotationindex, " && ")
end
end
end
elseif inprogram.head == :global || inprogram.head == :import ||
inprogram.head == :importall || inprogram.head == :export ||
inprogram.head == :typealias || inprogram.head == :abstract ||
inprogram.head == :using
outprogram = inprogram
elseif inprogram.head == :immutable || inprogram.head == :type
outprogram = Expr(inprogram.head)
for expr1 in inprogram.args
if typeof(expr1) <: Expr &&
(expr1.head == :(=) || expr1.head == :function)
outprogram1, trknum = coverage_rewrite_recursive(expr1,
trknum,
linenumberoffset,
annotationindex)
push!(outprogram.args, outprogram1)
else
push!(outprogram.args, expr1)
end
end
else
## For all other expression types, just make the output same as
## the input (with recursive calls)
outprogram = Expr(inprogram.head)
for expr1 in inprogram.args
outprogram1, trknum = coverage_rewrite_recursive(expr1,
trknum,
linenumberoffset,
annotationindex)
push!(outprogram.args, outprogram1)
end
end
outprogram, trknum
end
function linecount(string)
count = 0
pos = 1
while true
a = search(string, '\n', pos)
a == 0 && break
count += 1
pos = a + 1
end
count
end
filepreamble = "# Automatically generated by microcoverage.jl-- will be automatically deleted upon completion"
## This function takes a sourcefile name. It renames
## it to <oldname>.orig. It parses the original file
## and inserts tracking statements.
## Then it generates a new
## sourcefile with the same name as the old. The new
## file eval's the parsed version of the old file with
## tracking statements.
function begintrack(sourcefilename::ASCIIString)
println("reading $sourcefilename")
src = ""
open(sourcefilename, "r") do h
src = convert(ASCIIString, readbytes(h))
end
annotationindex = initannotationindex()
global trackarray::Array{Int,1}
lasttrknum = length(trackarray)
initsize = lasttrknum
srcpos = 1
src_parse_rewrite = (Expr)[]
println("parsing")
linenumberoffset = 0
while srcpos <= length(src)
if isspace(src[srcpos])
if src[srcpos] == '\n'
linenumberoffset += 1
end
srcpos += 1
elseif src[srcpos] == '#'
eolpos = search(src, '\n', srcpos)
srcpos = eolpos + 1
linenumberoffset += 1
else
src_parse, srcposfinal = parse(src, srcpos)
rewrite1,lasttrknum = coverage_rewrite_recursive(src_parse,
lasttrknum + 1,
linenumberoffset,
annotationindex)
linenumberoffset += linecount(src[srcpos : srcposfinal - 1])
srcpos = srcposfinal
push!(src_parse_rewrite, rewrite1)
end
end
resize!(trackarray, lasttrknum)
for j = initsize + 1 : lasttrknum
trackarray[j] = 0
end
global annotationindexdict::Dict{ASCIIString,AnnotationIndexType}
annotationindexdict[sourcefilename] = annotationindex
renamed = sourcefilename * ".orig"
if stat(renamed).size > 0
error("Cannot rename original -- file already exists with the name $renamed")
end
println("renaming $sourcefilename to $renamed")
mv(sourcefilename, renamed)
println("saving machine-generated code in $sourcefilename")
open(sourcefilename,"w") do h2
global filepreamble::ASCIIString
println(h2, filepreamble)
for rewrite in src_parse_rewrite
ss = IOBuffer()
serialize(ss, rewrite)
ser_rewrite = takebuf_array(ss)
numbyte = length(ser_rewrite)
println(h2, "eval(deserialize(IOBuffer((UInt8)[")
for count = 1 : numbyte
byte = ser_rewrite[count]
show(h2, byte)
count < numbyte && print(h2, ", ")
count % 8 == 0 && println(h2)
end
println(h2, "])))")
end
end
end
# Small routine to make an ASCIIString consisting of i spaces
spaces(i::Int) = convert(ASCIIString, 32*ones(UInt8,i))
## The next four functions are handlers for items in annotationindex.
## This first one handles an integer-- dispatches on whether it is positive
## or negative
function printmcov(item::Int,
lastprint::Int,
curcol::Int,
newline::Int,
horig::IO,
hcov::IO)
if item < 0
printmcovli(-item, lastprint,curcol,newline,horig,hcov)
else
printmcovtn(item, lastprint,curcol,newline,horig,hcov)
end
end
## Prints a line number and source line and advances the file
## to that line number, printing more source lines as necessary.
function printmcovli(lineno::Int,
lastprint::Int,
curcol::Int,
newline::Int,
horig::IO,
hcov::IO)
oldnewline = newline
newline = lineno
if newline < lastprint
error("Line numbers out of order in tracking info")
end
for count = lastprint + 1 : newline - 1
s = chomp(readline(horig))
if curcol > 16
println(hcov)
curcol = 0
end
println(hcov, spaces(16-curcol), "* ", s)
curcol = 0
end
lastprint = newline - 1
if newline != oldnewline
nls = "$newline"
print(hcov, "L", nls, spaces(8 - length(nls)))
curcol += 9
end
lastprint, curcol, newline
end
## Print a tracking number
function printmcovtn(tracknum::Int,
lastprint::Int,
curcol::Int,
newline::Int,
::IO,
hcov::IO)
global trackarray::Array{Int,1}
cov = trackarray[tracknum]
cs = " $cov "
print(hcov, cs)
curcol += length(cs)
lastprint, curcol, newline
end
## Print a string
function printmcov(outstring::ASCIIString,
lastprint::Int,
curcol::Int,
newline::Int,
::IO,
hcov::IO)
print(hcov, outstring)
curcol += length(outstring)
lastprint, curcol, newline
end
## endtrack is called when the trackign is finished
## It produces a coverage report in a file called <origfilename>.mcov.
## Then it renames the files back to how they were before
## begintrack was called.
function endtrack(sourcefilename::ASCIIString)
renamed = sourcefilename * ".orig"
covfilename = sourcefilename * ".mcov"
open(renamed, "r") do horig
open(covfilename, "w") do hcov
println("Writing coverage information to $covfilename")
global annotationindexdict::Dict{ASCIIString,AnnotationIndexType}
lastprint = 0
curcol = 0
newline = -1
for item in annotationindexdict[sourcefilename]
lastprint, curcol, newline = printmcov(item,
lastprint,
curcol,
newline,
horig,
hcov)
end
if curcol > 16
curcol = 0
println(hcov)
end
while !eof(horig)
s = chomp(readline(horig))
println(hcov, spaces(16-curcol), "* ", s)
curcol = 0
end
end
end
restore(sourcefilename)
end
function restore(sourcefilename::ASCIIString)
renamed = sourcefilename * ".orig"
if stat(renamed).size == 0
error("No file called $renamed")
end
s = ""
open(sourcefilename, "r") do hrewr
s = chomp(readline(hrewr))
end
global filepreamble::ASCIIString
if s != filepreamble
error("Cannot overwrite $sourcefilename; missing preamble statement")
end
println("renaming $renamed to $sourcefilename; machine-generated $sourcefilename overwritten")
mv(renamed, sourcefilename, remove_destination=true)
end
export restore
export begintrack
export endtrack
end
| {
"content_hash": "e9b5eda02495a17232e164787f81afeb",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 110,
"avg_line_length": 35.125786163522015,
"alnum_prop": 0.528498955535661,
"repo_name": "StephenVavasis/microcoverage",
"id": "b24c3bca522cc4969e54e442143bd16075d90699",
"size": "16755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microcoverage.jl",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "18072"
}
],
"symlink_target": ""
} |
define([
'../dom/el',
'../dom/selector',
'jef/functional/isArray',
'jef/domdiff/diff',
'jef/domdiff/applyDiff'
], function (
el,
selector,
isArray ,
domDiff,
applyDiff
) {
'use strict';
/**
* Compute and apply difference between node and streamed value.
*
* @param {Stream} stream
* @param {String} elementSelector
*/
return function domDiffWith(stream, elementSelector) {
return stream.on(function(value) {
var found = selector(elementSelector);
if (!found.length) {
throw new Error(
'domDiffWith: Can\'t match any element ' +
'for given selector: "'+ elementSelector +'"'
);
}
var element = found.get(0);
var clone = element.cloneNode(false);
var candidate = el(
clone,
isArray(value) ? value : [value]
);
applyDiff(
element,
candidate,
domDiff(
element,
candidate
)
);
clone = null;
found = null;
element = null;
candidate = null;
});
};
});
| {
"content_hash": "1fadecd02d450c54356028502d77808f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 68,
"avg_line_length": 24.25925925925926,
"alnum_prop": 0.45038167938931295,
"repo_name": "widmogrod/js-spot-the-difference",
"id": "095ae62b75def363b50f6575f772fccc57ae22b7",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/js/stream/domDiffWith.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7824"
},
{
"name": "JavaScript",
"bytes": "33738"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014-12-17 17:22:51
'''
from top.api.base import RestApi
class FenxiaoDiscountUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.detail_ids = None
self.detail_statuss = None
self.discount_id = None
self.discount_name = None
self.discount_status = None
self.discount_types = None
self.discount_values = None
self.target_ids = None
self.target_types = None
def getapiname(self):
return 'taobao.fenxiao.discount.update'
| {
"content_hash": "b579de9a0b399e9802f4bd4960b1817c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 55,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.696588868940754,
"repo_name": "CooperLuan/devops.notes",
"id": "738498e2eb6494c6f417fa77f3e95393d63627cb",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taobao/top/api/rest/FenxiaoDiscountUpdateRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1505"
},
{
"name": "JavaScript",
"bytes": "29"
},
{
"name": "Python",
"bytes": "211546"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
describe Zombie do
it 'raises an error if saved without a name' do
zombie = Zombie.new
except { zombie.save! }.to raise_error(
ActiveRecord::RecordInvalid
)
end
end
| {
"content_hash": "331f9dc6301451bb20c3f561607628ab",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 48,
"avg_line_length": 22,
"alnum_prop": 0.7159090909090909,
"repo_name": "ernestas-poskus/codeschool",
"id": "9b69e377ca59cf3d1325c5807bd2a06098b89f41",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rails Testing with Rspec/raise_error_match.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90"
},
{
"name": "CoffeeScript",
"bytes": "3723"
},
{
"name": "JavaScript",
"bytes": "15339"
},
{
"name": "Ruby",
"bytes": "90544"
}
],
"symlink_target": ""
} |
require "spec_helper"
describe AuthorizeNet do
it "should have a module called AuthorizeNet" do
defined?(AuthorizeNet).should be_true
AuthorizeNet.class.should equal(Module)
end
it "should have a module called AIM" do
defined?(AuthorizeNet::AIM).should be_true
AuthorizeNet::AIM.class.should equal(Module)
end
end
describe AuthorizeNet::CreditCard do
before do
@card_number = '4111111111111111'
@expiry = '01' + (Time.now + (3600 * 24 * 365)).strftime('%y')
end
it "should support instantiation" do
AuthorizeNet::CreditCard.new(@card_number, @expiry).should be_instance_of(AuthorizeNet::CreditCard)
end
it "should support converting itself into a hash" do
card = AuthorizeNet::CreditCard.new(@card_number, @expiry)
card.should respond_to(:to_hash)
card.to_hash.should be_kind_of(Hash)
end
it "should have the right payment method type" do
card = AuthorizeNet::CreditCard.new(@card_number, @expiry)
fields = card.to_hash
fields[:method].should == AuthorizeNet::PaymentMethodType::CREDIT_CARD
end
it "should respond to attributes" do
card = AuthorizeNet::CreditCard.new(@card_number, @expiry)
card.card_number.should == @card_number
card.expiration.should == @expiry
card.card_code.should be_nil
end
end
describe AuthorizeNet::ECheck do
before do
@routing_number = '322271627'
@account_number = '123456789'
@bank_name = 'JPMorgan Chase Bank'
@account_holder_name = 'John Doe'
end
it "should support instantiation" do
AuthorizeNet::ECheck.new(@routing_number, @account_number, @bank_name, @account_holder_name).should be_instance_of(AuthorizeNet::ECheck)
end
it "should support converting itself into a hash" do
echeck = AuthorizeNet::ECheck.new(@routing_number, @account_number, @bank_name, @account_holder_name)
echeck.should respond_to(:to_hash)
echeck.to_hash.should be_kind_of(Hash)
end
it "should support payment method code retrival" do
echeck = AuthorizeNet::ECheck.new(@routing_number, @account_number, @bank_name, @account_holder_name)
fields = echeck.to_hash
fields[:method].should == AuthorizeNet::PaymentMethodType::ECHECK
end
end
describe AuthorizeNet::Address do
before do
end
it "should support instantiation" do
AuthorizeNet::Address.new.should be_instance_of(AuthorizeNet::Address)
end
it "should support converting itself into a hash" do
address = AuthorizeNet::Address.new
address.should respond_to(:to_hash)
address.to_hash.should be_kind_of(Hash)
end
it "should ignore unknown fields" do
address = AuthorizeNet::Address.new(:tax => '123')
hash = address.to_hash
hash.should be_kind_of(Hash)
hash.should == {}
end
it "should accept known fields" do
address = AuthorizeNet::Address.new(:first_name => '123')
hash = address.to_hash
hash.should be_kind_of(Hash)
hash.should == {:first_name => '123'}
end
end
describe AuthorizeNet::ShippingAddress do
before do
end
it "should support instantiation" do
AuthorizeNet::ShippingAddress.new.should be_instance_of(AuthorizeNet::ShippingAddress)
end
it "should support converting itself into a hash" do
address = AuthorizeNet::ShippingAddress.new
address.should respond_to(:to_hash)
address.to_hash.should be_kind_of(Hash)
end
it "should ignore unknown fields" do
address = AuthorizeNet::ShippingAddress.new(:pie => '123')
hash = address.to_hash
hash.should be_kind_of(Hash)
hash.should == {}
end
it "should accept known fields" do
address = AuthorizeNet::ShippingAddress.new(:first_name => '123')
hash = address.to_hash
hash.should be_kind_of(Hash)
hash.should == {:ship_to_first_name => '123'}
end
end
describe AuthorizeNet::Customer do
before do
end
it "should support instantiation" do
AuthorizeNet::Customer.new.should be_instance_of(AuthorizeNet::Customer)
end
it "should support converting itself into a hash" do
customer = AuthorizeNet::Customer.new
customer.should respond_to(:to_hash)
customer.to_hash.should be_kind_of(Hash)
end
it "should ignore unknown fields" do
customer = AuthorizeNet::Customer.new(:name => '123')
hash = customer.to_hash
hash.should be_kind_of(Hash)
hash.should == {}
end
it "should accept known fields" do
customer = AuthorizeNet::Customer.new(:id => '123')
hash = customer.to_hash
hash.should be_kind_of(Hash)
hash.should == {:cust_id => '123'}
end
it "should accept an address record" do
address = AuthorizeNet::Address.new(:first_name => 'Tester', :last_name => 'Testerson')
customer = AuthorizeNet::Customer.new(:address => address)
hash = customer.to_hash
hash.should be_kind_of(Hash)
hash.should == {:first_name => 'Tester', :last_name => 'Testerson'}
end
end
describe AuthorizeNet::EmailReceipt do
before do
end
it "should support instantiation" do
AuthorizeNet::EmailReceipt.new.should be_instance_of(AuthorizeNet::EmailReceipt)
end
it "should support converting itself into a hash" do
email = AuthorizeNet::EmailReceipt.new
email.should respond_to(:to_hash)
email.to_hash.should be_kind_of(Hash)
end
it "should ignore unknown fields" do
email = AuthorizeNet::EmailReceipt.new(:name => '123')
hash = email.to_hash
hash.should be_kind_of(Hash)
hash.should == {}
end
it "should accept known fields" do
email = AuthorizeNet::EmailReceipt.new(:header => '123')
hash = email.to_hash
hash.should be_kind_of(Hash)
hash.should == {:header => '123'}
end
end | {
"content_hash": "42c0c8ea708c87c0d022ce363aebc97b",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 140,
"avg_line_length": 28.51,
"alnum_prop": 0.6902841108383023,
"repo_name": "jinutm/silvfinal",
"id": "ff6609a6826025f89eca33d54489f7bab5a11113",
"size": "5702",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vendor/bundle/ruby/2.1.0/gems/authorize-net-1.5.2/spec/authorize_net_spec.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "108962"
},
{
"name": "CoffeeScript",
"bytes": "47041"
},
{
"name": "JavaScript",
"bytes": "234760"
},
{
"name": "Ruby",
"bytes": "603350"
},
{
"name": "Shell",
"bytes": "106"
}
],
"symlink_target": ""
} |
package jdepend.parse.sql;
import java.util.ArrayList;
import java.util.List;
import jdepend.framework.log.LogUtil;
import jdepend.framework.util.StringUtil;
import jdepend.metadata.TableInfo;
import net.sf.jsqlparser.parser.CCJSqlParserUtil;
import net.sf.jsqlparser.statement.Statement;
import net.sf.jsqlparser.statement.delete.Delete;
import net.sf.jsqlparser.statement.insert.Insert;
import net.sf.jsqlparser.statement.select.Select;
import net.sf.jsqlparser.statement.update.Update;
import net.sf.jsqlparser.util.TablesNamesFinder;
public class SqlParserThird extends SqlParser {
public List<TableInfo> parserSql(String sql) {
List<TableInfo> tables = new ArrayList<TableInfo>();
TableInfo tableInfo = null;
Statement statement;
List<String> tableNames = null;
String operation = null;
try {
statement = CCJSqlParserUtil.parse(sql);
TablesNamesFinder tablesNamesFinder = new TablesNamesFinder();
if (statement instanceof Select) {
tableNames = tablesNamesFinder.getTableList((Select) statement);
operation = TableInfo.Read;
} else if (statement instanceof Insert) {
tableNames = tablesNamesFinder.getTableList((Insert) statement);
operation = TableInfo.Create;
} else if (statement instanceof Delete) {
tableNames = tablesNamesFinder.getTableList((Delete) statement);
operation = TableInfo.Delete;
} else if (statement instanceof Update) {
tableNames = tablesNamesFinder.getTableList((Update) statement);
operation = TableInfo.Update;
}
if (tableNames != null) {
for (String tableName : tableNames) {
if (!StringUtil.isEmpty(tableName)) {
tableInfo = new TableInfo(tableName, operation);
tables.add(tableInfo);
}
}
}
return tables;
} catch (Exception e) {
e.printStackTrace();
LogUtil.getInstance(SqlParserThird.class).systemError(
"解析sql出错[" + sql + "]");
return null;
}
}
}
| {
"content_hash": "0791b88916dcb4426ef3652a784f6871",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 68,
"avg_line_length": 31.887096774193548,
"alnum_prop": 0.7152250885179565,
"repo_name": "jdepend/cooper",
"id": "8af7876bdb6e1c71a89386ef4aecb164f4745b23",
"size": "1985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cooper-source/cooper-parse/src/main/java/jdepend/parse/sql/SqlParserThird.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4321"
},
{
"name": "CSS",
"bytes": "44511"
},
{
"name": "HTML",
"bytes": "175873"
},
{
"name": "Java",
"bytes": "2383877"
},
{
"name": "JavaScript",
"bytes": "146288"
}
],
"symlink_target": ""
} |
[](https://travis-ci.org/kitsonk/sutabu)
A minimalist AMD stubbing library for use in testing.
This requires ES5 support. Therefore IE8 and prior are not supported.
## Usage
The main way to use *sutabu* is to generate stub functions. Generating a new stub function looks like this:
```javascript
define([ 'sutabu/stub' ], function (stub) {
var stubFunc = stub();
var result = stubFunc();
console.log(stubFunc.callCount); // outputs: 1
});
```
Each generated stub function contains 3 non-emurable properties that dictate how the stub behaves. If any of these are
set, they will be processed when the function is invoked and returned:
| Property | Description |
|----------| --------------------------------------------------------------------------------------------------------- |
| .throws | If not undefined, this will be thrown when the stub function is called. |
| .calls | Assumed to be a function, it will be called with the same scope and arguments as the stub function. |
| .returns | Whatever is assigned here is returned, which defaults to undefined, which means stub functions return by default `undefined` |
When a new stub function is created, you can pass a single argument that will define how the stub behaves when called.
If you pass an instance of `Error` it will assigned to `.throws`, if you pass a function, it will be assigned to
`.calls` and if you pass any other type of argument, it is assigned to `.returns`.
### Stubbing Functions
If you want to specifically stub a function, you can call `stub.func()`. Basic usage is something like this:
```javascript
define([ 'sutabu/stub' ], function (stub) {
var obj = {};
var handle = stub.func(obj, 'foo', function () {
return 'bar';
});
console.log(obj.foo()); // outputs: 'bar'
console.log(obj.foo.callCount); // outputs: 1
handle.remove(); // removes the stub
});
```
## Testing
Testing is done via [The Intern](https://theintern.io) and uses [Grunt](http://gruntjs.com/) for task automation. To
run the unit tests, first install the prerequisites via [npm](https://www.npmjs.org/):
```bash
npm install
```
To run the unit tests locally, against Node:
```bash
grunt test:node
```
To run the tests using [SauceLabs](https://saucelabs.com/), assuming you have your SauceLabs credentials available in
the environment:
```bash
grunt test
```
| {
"content_hash": "5816aee7fa19db2fe6697a50dea3f099",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 139,
"avg_line_length": 34.18055555555556,
"alnum_prop": 0.6761479073547338,
"repo_name": "kitsonk/sutabu",
"id": "7732cbaef9abdcda655428ec80314f63b02ea19c",
"size": "2471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "9231"
}
],
"symlink_target": ""
} |
This use of `for` is functionally equivalent
to `while (condition)` in C and others alike.
```
$fiveEs = ""
for $fiveEs.length < 5 {
$fiveEs += "e"
}
say($fiveEs) # eeeee
```
[Next: Infinite loops](14-infinite-loops.md) | {
"content_hash": "af6f2293172f9dea5e204635ff3f2e04",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 45,
"avg_line_length": 16.428571428571427,
"alnum_prop": 0.6347826086956522,
"repo_name": "cooper/ferret",
"id": "1e56cee8e760f16a27534d8de166eb7ea29d8916",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/intro/13-for-conditional.md",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Forth",
"bytes": "80168"
},
{
"name": "Perl",
"bytes": "524174"
}
],
"symlink_target": ""
} |
package com.amazonaws.services.comprehend.model.transform;
import java.math.*;
import javax.annotation.Generated;
import com.amazonaws.services.comprehend.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* DescribeEntitiesDetectionJobResult JSON Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeEntitiesDetectionJobResultJsonUnmarshaller implements Unmarshaller<DescribeEntitiesDetectionJobResult, JsonUnmarshallerContext> {
public DescribeEntitiesDetectionJobResult unmarshall(JsonUnmarshallerContext context) throws Exception {
DescribeEntitiesDetectionJobResult describeEntitiesDetectionJobResult = new DescribeEntitiesDetectionJobResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL) {
return describeEntitiesDetectionJobResult;
}
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("EntitiesDetectionJobProperties", targetDepth)) {
context.nextToken();
describeEntitiesDetectionJobResult.setEntitiesDetectionJobProperties(EntitiesDetectionJobPropertiesJsonUnmarshaller.getInstance()
.unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return describeEntitiesDetectionJobResult;
}
private static DescribeEntitiesDetectionJobResultJsonUnmarshaller instance;
public static DescribeEntitiesDetectionJobResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new DescribeEntitiesDetectionJobResultJsonUnmarshaller();
return instance;
}
}
| {
"content_hash": "310d3e99561b4300caf9c555a9e577bf",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 150,
"avg_line_length": 39.3125,
"alnum_prop": 0.685214626391097,
"repo_name": "aws/aws-sdk-java",
"id": "6105872ace40339ac85940e5bfde606c3a0dffe7",
"size": "3096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws-java-sdk-comprehend/src/main/java/com/amazonaws/services/comprehend/model/transform/DescribeEntitiesDetectionJobResultJsonUnmarshaller.java",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
// CEudoraAdbkIOPluginDLL.h
//
// Copyright 2006, Cyrus Daboo. All Rights Reserved.
//
// Created: 14-Jan-1998
// Author: Cyrus Daboo
// Platforms: Mac OS, Win32
//
// Description:
// This class implements Eudora addressbook import DLL based plug-in for use in Mulberry.
//
// History:
// 14-Jan-1998: Created initial header and implementation.
//
#ifndef __EUDORA4ADBKIO_PLUGIN_MULBERRY__
#define __EUDORA4ADBKIO_PLUGIN_MULBERRY__
#include "CAdbkIOPluginDLL.h"
// Classes
class CEudora4AdbkIOPluginDLL : public CAdbkIOPluginDLL
{
public:
// Actual plug-in class
CEudora4AdbkIOPluginDLL();
virtual ~CEudora4AdbkIOPluginDLL();
// Entry codes
virtual void Initialise(void); // Initialisation
virtual bool CanRun(void); // Test whether plug-in can run
// Registration
virtual bool UseRegistration(unsigned long* key); // Does plug-in need to be registered
virtual bool CanDemo(void); // Can plug-in run as demo
// Entry codes
virtual long ExportAddress(SAdbkIOPluginAddress* addr); // Export an address
virtual long ExportGroup(SAdbkIOPluginGroup* grp); // Export a group
protected:
// These should be returned by specific sub-class
virtual const char* GetName(void) const; // Returns the name of the plug-in
virtual long GetVersion(void) const; // Returns the version number of the plug-in
virtual EPluginType GetType(void) const; // Returns the type of the plug-in
virtual const char* GetManufacturer(void) const; // Returns manufacturer of plug-in
virtual const char* GetDescription(void) const; // Returns description of plug-in
virtual long ImportAddresses(char* data); // Do the actual import - pure virtual - must do in subclass
private:
char* ConvertFromCRLF(const char* str); // Convert CRLFs -> 0x03
char* ConvertToCRLF(const char* str); // Convert 0x03 -> CRLFs
void AddressOut(const char* str); // Do quote etc of friendly name
SAdbkIOPluginAddress* AddressListParse(char* str); // Do quote etc of friendly name
void ParseAddress(SAdbkIOPluginAddress* adr, const char* txt); // Parse address from text
void ParseNotes(SAdbkIOPluginAddress* adr, const char* txt); // Parse notes from text
};
#endif
| {
"content_hash": "6dcff1820504bf3e596b32e945a08fae",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 105,
"avg_line_length": 34.171875,
"alnum_prop": 0.7370827617741198,
"repo_name": "mbert/mulberry-main",
"id": "599cb415cde8fd6788eb75dd898be4dea283859e",
"size": "2822",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Plug-ins/Eudora4AdbkIO/sources/CEudora4AdbkIOPluginDLL.h",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "569106"
},
{
"name": "Batchfile",
"bytes": "80126"
},
{
"name": "C",
"bytes": "13996926"
},
{
"name": "C++",
"bytes": "36795816"
},
{
"name": "Component Pascal",
"bytes": "63931"
},
{
"name": "DIGITAL Command Language",
"bytes": "273849"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "Groff",
"bytes": "5"
},
{
"name": "HTML",
"bytes": "219178"
},
{
"name": "Logos",
"bytes": "108920"
},
{
"name": "Makefile",
"bytes": "11884"
},
{
"name": "Objective-C",
"bytes": "129690"
},
{
"name": "Perl",
"bytes": "1749015"
},
{
"name": "Perl6",
"bytes": "27602"
},
{
"name": "Prolog",
"bytes": "29177"
},
{
"name": "Python",
"bytes": "8651"
},
{
"name": "R",
"bytes": "741731"
},
{
"name": "Rebol",
"bytes": "179366"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Shell",
"bytes": "172439"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "eC",
"bytes": "4568"
}
],
"symlink_target": ""
} |
<?php use_javascript('/sfBlogsPlugin/admin/js/jquery.autopager.js') ?>
<?php use_javascript('/sfBlogsPlugin/admin/js/jquery.datagrid.js') ?>
<?php use_javascript('/sfBlogsPlugin/admin/js/postsBehaviors.js') ?>
<?php use_helper('I18N') ?>
<?php $sf_response->setTitle('Posts') ?>
<?php if (!$hasBlog): ?>
<div class="invite"><?php echo __('In order to be able to write a post, you must first %link%.', array('%link%' => link_to(__('create a blog'), 'sfBlogAdmin/blogEdit'))) ?></div>
<?php return ?>
<?php endif ?>
<?php slot('main_form') ?>
<?php echo form_tag('sfBlogAdmin/posts', array('method' => 'get', 'id' => 'post_filter_form', 'class' => 'filter')) ?>
<?php echo input_hidden_tag('filter', 'filter') ?>
<?php end_slot() ?>
<?php slot('main_form_end') ?>
</form>
<?php end_slot() ?>
<?php slot('toolbar') ?>
<div class="end">
<?php echo input_tag('filters[text]', isset($filters['text']) && $filters['text'] ? $filters['text'] : __('search')) ?>
</div>
<ul class="buttons">
<li class="active post_add"><?php echo link_to(__('New post'), 'sfBlogAdmin/postEdit') ?></li>
<li class="contextual post_edit"><a><?php echo __('Edit') ?></a></li>
<li class="contextual publish draft"><a><?php echo __('Publish') ?></a></li>
<li class="contextual post_delete"><a><?php echo __('Delete') ?></a></li>
</ul>
<?php end_slot() ?>
<?php slot('navigation') ?>
<?php include_component('sfBlogAdmin', 'postFilter', array('filters' => $filters)) ?>
<?php end_slot() ?>
<div id="list" class="scrollable">
<table border="0" cellspacing="0" cellpadding="0" class="list">
<thead>
<tr>
<th width="35%" class="<?php echo $sort['sort'] == 'title' ? $sort['type'] : '' ?>">
<?php echo link_to(__('Title'), 'sfBlogAdmin/posts', array(
'query_string' => 'sort=title&type='.($sort['sort'] == 'title' ? ($sort['type'] == 'asc' ? 'desc' : 'asc') : 'asc')
)) ?>
</th>
<th width="20%" class="<?php echo $sort['sort'] == 'blog' ? $sort['type'] : '' ?>">
<?php echo link_to(__('Blog'), 'sfBlogAdmin/posts', array(
'query_string' => 'sort=blog&type='.($sort['sort'] == 'blog' ? ($sort['type'] == 'asc' ? 'desc' : 'asc') : 'asc')
)) ?>
</th>
<th width="10%" class="<?php echo $sort['sort'] == 'author' ? $sort['type'] : '' ?>">
<?php echo link_to(__('Author'), 'sfBlogAdmin/posts', array(
'query_string' => 'sort=author&type='.($sort['sort'] == 'author' ? ($sort['type'] == 'asc' ? 'desc' : 'asc') : 'asc')
)) ?>
</th>
<th width="15%"><?php echo __('Tags') ?></th>
<th width="10%" class="inverted <?php echo $sort['sort'] == 'default' ? $sort['type'] : '' ?>">
<?php echo link_to(__('Published at'), 'sfBlogAdmin/posts', array(
'query_string' => 'sort=default&type='.($sort['sort'] == 'default' ? ($sort['type'] == 'asc' ? 'desc' : 'asc') : 'desc')
)) ?>
</th>
<th width="10%" class="<?php echo $sort['sort'] == 'nb_comments' ? $sort['type'] : '' ?>">
<?php echo link_to(__('Nb comments'), 'sfBlogAdmin/posts', array(
'query_string' => 'sort=nb_comments&type='.($sort['sort'] == 'nb_comments' ? ($sort['type'] == 'asc' ? 'desc' : 'asc') : 'asc')
)) ?>
</th>
</tr>
</thead>
<tbody class="items">
<?php include_partial('sfBlogAdmin/postList', array('pager' => $pager)) ?>
</tbody>
</table>
</div>
<div id="preview">
</div>
<div></div> <?php // this empty div is necessary for the splitter, and becomes the separator ?>
<?php echo input_hidden_tag('sort', $sort['sort']) ?>
<?php echo input_hidden_tag('type', $sort['type']) ?>
<script type="text/javascript" charset="utf-8">
//<![CDATA[
var current_page = <?php echo $pager->getPage() ?>;
var max_page = <?php echo $pager->getLastPage() ?>;
var initial_search = '<?php echo __('search') ?>';
var preview_url = '<?php echo url_for('sfBlogAdmin/postPreview') ?>';
var delete_message = '<?php echo __('Are you sure you want to delete this post?') ?>';
var delete_url = '<?php echo url_for('sfBlogAdmin/postDelete') ?>';
var publish_message = '<?php echo __('Are you sure?') ?>';
var publish_url = '<?php echo url_for('sfBlogAdmin/togglePublishPost') ?>';
//]]>
</script> | {
"content_hash": "662c84348675df1c8b43f1d6bd9d442a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 180,
"avg_line_length": 46.13978494623656,
"alnum_prop": 0.550221393614542,
"repo_name": "Symfony-Plugins/sfBlogsPlugin",
"id": "f188ea5b7c18cb05f54877522ec49c771e5ed0e1",
"size": "4291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/sfBlogAdmin/templates/postsSuccess.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "212855"
},
{
"name": "PHP",
"bytes": "121848"
}
],
"symlink_target": ""
} |
FROM balenalib/revpi-connect-ubuntu:bionic-run
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
ca-certificates \
\
# .NET Core dependencies
libc6 \
libgcc1 \
libgssapi-krb5-2 \
libicu60 \
libssl1.1 \
libstdc++6 \
zlib1g \
&& rm -rf /var/lib/apt/lists/*
# Configure web servers to bind to port 80 when present
ENV ASPNETCORE_URLS=http://+:80 \
# Enable detection of running in a container
DOTNET_RUNNING_IN_CONTAINER=true
# Install .NET Core
ENV DOTNET_VERSION 3.1.21
RUN curl -SL --output dotnet.tar.gz "https://dotnetcli.blob.core.windows.net/dotnet/Runtime/$DOTNET_VERSION/dotnet-runtime-$DOTNET_VERSION-linux-arm.tar.gz" \
&& dotnet_sha512='9c3fb0f5f860f53ab4d15124c2c23a83412ea916ad6155c0f39f066057dcbb3ca6911ae26daf8a36dbfbc09c17d6565c425fbdf3db9114a28c66944382b71000' \
&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \
&& mkdir -p /usr/share/dotnet \
&& tar -zxf dotnet.tar.gz -C /usr/share/dotnet \
&& rm dotnet.tar.gz \
&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet
ENV ASPNETCORE_VERSION 3.1.21
RUN curl -SL --output aspnetcore.tar.gz "https://dotnetcli.blob.core.windows.net/dotnet/aspnetcore/Runtime/$ASPNETCORE_VERSION/aspnetcore-runtime-$ASPNETCORE_VERSION-linux-arm.tar.gz" \
&& aspnetcore_sha512='3f7e1839946c65c437a8b55f1f66b15f8faa729abd19874cb2507c10fb5ae6a572c7d4943141b8a450ee74082c3719d4f146c79f2fabf48716ff28be2720effa' \
&& echo "$aspnetcore_sha512 aspnetcore.tar.gz" | sha512sum -c - \
&& mkdir -p /usr/share/dotnet \
&& tar -zxf aspnetcore.tar.gz -C /usr/share/dotnet ./shared/Microsoft.AspNetCore.App \
&& rm aspnetcore.tar.gz
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/44e597e40f2010cdde15b3ba1e397aea3a5c5271/scripts/assets/tests/test-stack@dotnet.sh" \
&& echo "Running test-stack@dotnet" \
&& chmod +x test-stack@dotnet.sh \
&& bash test-stack@dotnet.sh \
&& rm -rf test-stack@dotnet.sh
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Ubuntu bionic \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \ndotnet 3.1-aspnet \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh | {
"content_hash": "bc7251be8da85c71a2b8cb9d212f2956",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 679,
"avg_line_length": 56.36363636363637,
"alnum_prop": 0.7148387096774194,
"repo_name": "resin-io-library/base-images",
"id": "0b2a96cd057c87cb3fbc604d8dea7c8e5b4ed61a",
"size": "3121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "balena-base-images/dotnet/revpi-connect/ubuntu/bionic/3.1-aspnet/run/Dockerfile",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "71234697"
},
{
"name": "JavaScript",
"bytes": "13096"
},
{
"name": "Shell",
"bytes": "12051936"
},
{
"name": "Smarty",
"bytes": "59789"
}
],
"symlink_target": ""
} |
package org.springframework.cloud.dataflow.server.db.migration.postgresql;
import java.util.Arrays;
import org.flywaydb.core.api.migration.BaseJavaMigration;
import org.flywaydb.core.api.migration.Context;
import org.springframework.cloud.dataflow.common.flyway.SqlCommand;
import org.springframework.cloud.dataflow.common.flyway.SqlCommandsRunner;
/**
* This migration class adds description column to stream_definitions and task_definitions
* tables and original_definition column to stream_definitions.
*
* @author Daniel Serleg
* @author Ilayaperumal Gopinathan
* @author Michael Minella
*
* @since 2.3
*/
public class V2__Add_Descriptions_OriginalDefinition extends BaseJavaMigration {
public final static String ALTER_STREAM_DEFINITION_TABLE_DESC = "alter table stream_definitions add column description varchar(255)";
public final static String ALTER_STREAM_DEFINITION_TABLE_ORIG_DEF = "alter table stream_definitions add column original_definition text";
public final static String ALTER_TASK_DEFINITION_TABLE = "" +
"alter table task_definitions add column description varchar(255)";
public final static String UPDATE_STREAM_DEFINITION_TABLE_ORIG_DEF = "update stream_definitions set original_definition=definition";
public final static String CREATE_TASK_METADATA_TABLE =
"CREATE TABLE task_execution_metadata (\n" +
" id int8 NOT NULL,\n" +
" task_execution_id int8 NOT NULL,\n" +
" task_execution_manifest TEXT,\n" +
" primary key (id),\n" +
" CONSTRAINT TASK_METADATA_FK FOREIGN KEY (TASK_EXECUTION_ID)\n" +
" REFERENCES TASK_EXECUTION(TASK_EXECUTION_ID)\n" +
")";
private final static String CREATE_TASK_METADATA_SEQUENCE =
"CREATE SEQUENCE task_execution_metadata_seq MAXVALUE 9223372036854775807 NO CYCLE";
private final SqlCommandsRunner runner = new SqlCommandsRunner();
@Override
public void migrate(Context context) throws Exception {
runner.execute(context.getConnection(), Arrays.asList(
SqlCommand.from(ALTER_STREAM_DEFINITION_TABLE_DESC),
SqlCommand.from(ALTER_STREAM_DEFINITION_TABLE_ORIG_DEF),
SqlCommand.from(ALTER_TASK_DEFINITION_TABLE),
SqlCommand.from(UPDATE_STREAM_DEFINITION_TABLE_ORIG_DEF),
SqlCommand.from(CREATE_TASK_METADATA_TABLE),
SqlCommand.from(CREATE_TASK_METADATA_SEQUENCE)));
}
}
| {
"content_hash": "2eee27a4b4bbbad4cbf7e5b21b6a3267",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 138,
"avg_line_length": 40.39655172413793,
"alnum_prop": 0.7622705932565087,
"repo_name": "markpollack/spring-cloud-dataflow",
"id": "4f3362897b133fd3b81314bbc647303b810f05ab",
"size": "2959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spring-cloud-dataflow-server-core/src/main/java/org/springframework/cloud/dataflow/server/db/migration/postgresql/V2__Add_Descriptions_OriginalDefinition.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "889"
},
{
"name": "Java",
"bytes": "3691457"
},
{
"name": "Ruby",
"bytes": "423"
},
{
"name": "Shell",
"bytes": "5861"
},
{
"name": "TSQL",
"bytes": "51360"
},
{
"name": "Vim Snippet",
"bytes": "190"
},
{
"name": "XSLT",
"bytes": "863"
}
],
"symlink_target": ""
} |
/**
* The tests for the state transfer support.
*/
package tests.backtype.storm.stateTransfer; | {
"content_hash": "76812aa93bfa566ee8d583655f107961",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 44,
"avg_line_length": 24.75,
"alnum_prop": 0.7272727272727273,
"repo_name": "QualiMaster/Infrastructure",
"id": "96ead9209de03298e4032da01526e46285033447",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StormStateTransfer/src/tests/backtype/storm/stateTransfer/package-info.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "575"
},
{
"name": "Java",
"bytes": "4750737"
},
{
"name": "Shell",
"bytes": "9223"
}
],
"symlink_target": ""
} |
package org.eclipse.net4j.util.io;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
/**
* @author Eike Stepper
*/
public final class ZIPUtil
{
public static final int DEFALULT_BUFFER_SIZE = 4096;
private static final int ORDER_KEEP = -1;
private static final int ORDER_SWAP = 1;
private ZIPUtil()
{
}
public static void zip(ZipEntryHandler handler, File zipFile) throws IORuntimeException
{
final byte[] buffer = new byte[DEFALULT_BUFFER_SIZE];
final EntryContext context = new EntryContext();
FileOutputStream fos = IOUtil.openOutputStream(zipFile);
ZipOutputStream zos = null;
InputStream input = null;
ZipEntry entry = null;
try
{
zos = new ZipOutputStream(new BufferedOutputStream(fos, DEFALULT_BUFFER_SIZE));
for (;;)
{
handler.handleEntry(context);
if (context.isEmpty())
{
break;
}
try
{
String name = context.getName().replace(File.separatorChar, '/');
entry = new ZipEntry(name);
zos.putNextEntry(entry);
if (!context.isDirectory())
{
input = context.getInputStream();
if (input == null)
{
throw new IllegalStateException("Input is null for zip entry " + name); //$NON-NLS-1$
}
IOUtil.copy(input, zos, buffer);
}
}
finally
{
IOUtil.closeSilent(input);
if (entry != null)
{
zos.closeEntry();
}
context.reset();
}
}
}
catch (IOException ex)
{
throw new IORuntimeException(ex);
}
finally
{
IOUtil.closeSilent(zos);
IOUtil.closeSilent(fos);
}
}
public static void zip(File sourceFolder, boolean excludeRoot, File zipFile)
{
zip(new FileSystemZipHandler(sourceFolder, excludeRoot), zipFile);
}
public static void unzip(File zipFile, UnzipHandler handler) throws IORuntimeException
{
FileInputStream fis = IOUtil.openInputStream(zipFile);
ZipInputStream zis = null;
try
{
zis = new ZipInputStream(new BufferedInputStream(fis, DEFALULT_BUFFER_SIZE));
ZipEntry entry;
while ((entry = zis.getNextEntry()) != null)
{
if (entry.isDirectory())
{
handler.unzipDirectory(entry.getName());
}
else
{
// TODO Provide delegating InputStream that ignores close()
handler.unzipFile(entry.getName(), zis);
}
}
}
catch (IOException ex)
{
throw new IORuntimeException(ex);
}
finally
{
IOUtil.closeSilent(zis);
IOUtil.closeSilent(fis);
}
}
public static void unzip(File zipFile, File targetFolder) throws IORuntimeException
{
unzip(zipFile, new FileSystemUnzipHandler(targetFolder, DEFALULT_BUFFER_SIZE));
}
/**
* @author Eike Stepper
*/
public interface ZipEntryHandler
{
public void handleEntry(EntryContext context) throws IOException;
}
/**
* @author Eike Stepper
*/
public interface UnzipHandler
{
public void unzipDirectory(String name) throws IOException;
public void unzipFile(String name, InputStream zipStream) throws IOException;
}
/**
* @author Eike Stepper
*/
public static final class EntryContext
{
private static final String EMPTY = new String();
private String name = EMPTY;
private InputStream inputStream;
private boolean directory;
EntryContext()
{
}
void reset()
{
name = null;
inputStream = null;
}
boolean isEmpty()
{
return name == null;
}
boolean isDirectory()
{
return directory;
}
String getName()
{
return name;
}
InputStream getInputStream()
{
return inputStream;
}
public void setName(String name, boolean directory)
{
this.name = name + (directory ? "/" : ""); //$NON-NLS-1$ //$NON-NLS-2$
this.directory = directory;
}
public void setInputStream(InputStream inputStream)
{
this.inputStream = inputStream;
}
}
/**
* @author Eike Stepper
*/
public static final class FileSystemZipHandler implements ZipEntryHandler
{
private int sourceFolderLength;
private transient Iterator<File> files;
public FileSystemZipHandler(File sourceFolder, boolean excludeRoot)
{
File root = excludeRoot ? sourceFolder : sourceFolder.getParentFile();
sourceFolderLength = root.getAbsolutePath().length();
if (excludeRoot)
{
++sourceFolderLength;
}
final int baseLength = sourceFolder.getAbsolutePath().length();
List<File> list = IOUtil.listBreadthFirst(sourceFolder);
Collections.sort(list, new Comparator<File>()
{
public int compare(File f1, File f2)
{
String path1 = getPath(f1, baseLength);
String path2 = getPath(f2, baseLength);
if (path1.length() == 0)
{
return ORDER_KEEP;
}
if (path2.length() == 0)
{
return ORDER_SWAP;
}
if (f1.isDirectory())
{
if (f2.isDirectory())
{
// f1=dir, f2=dir
if (path1.equalsIgnoreCase("/META-INF")) //$NON-NLS-1$
{
return ORDER_KEEP;
}
if (path2.equalsIgnoreCase("/META-INF")) //$NON-NLS-1$
{
return ORDER_SWAP;
}
return path1.compareTo(path2);
}
// f1=dir, f2=file
if (path1.equalsIgnoreCase("/META-INF")) //$NON-NLS-1$
{
return ORDER_KEEP;
}
if (path2.equalsIgnoreCase("/META-INF/MANIFEST.MF")) //$NON-NLS-1$
{
return ORDER_SWAP;
}
return ORDER_KEEP;
}
if (f2.isDirectory())
{
// f1=file, f2=dir
if (path2.equalsIgnoreCase("/META-INF")) //$NON-NLS-1$
{
return ORDER_SWAP;
}
if (path1.equalsIgnoreCase("/META-INF/MANIFEST.MF")) //$NON-NLS-1$
{
return ORDER_KEEP;
}
return ORDER_SWAP;
}
// f1=file, f2=file
if (path1.equalsIgnoreCase("/META-INF/MANIFEST.MF")) //$NON-NLS-1$
{
return ORDER_KEEP;
}
if (path2.equalsIgnoreCase("/META-INF/MANIFEST.MF")) //$NON-NLS-1$
{
return ORDER_SWAP;
}
return path1.compareTo(path2);
}
private String getPath(File file, int baseLength)
{
String absolutePath = file.getAbsolutePath();
String substring = absolutePath.substring(baseLength);
String replace = substring.replace(File.separatorChar, '/');
return replace;
}
});
files = list.iterator();
if (excludeRoot)
{
files.next();
}
}
public void handleEntry(EntryContext context) throws IOException
{
if (files.hasNext())
{
File file = files.next();
String name = getName(file);
if (name.length() != 0)
{
context.setName(name, file.isDirectory());
if (file.isFile())
{
context.setInputStream(IOUtil.openInputStream(file));
}
}
}
}
protected String getName(File file)
{
return file.getAbsolutePath().substring(sourceFolderLength);
}
}
/**
* @author Eike Stepper
*/
public static final class FileSystemUnzipHandler implements UnzipHandler
{
private File targetFolder;
private transient byte[] buffer;
public FileSystemUnzipHandler(File targetFolder, int bufferSize)
{
this.targetFolder = targetFolder;
buffer = new byte[bufferSize];
}
public File getTargetFolder()
{
return targetFolder;
}
public void unzipDirectory(String name)
{
File directory = new File(targetFolder, name);
if (!directory.exists())
{
directory.mkdirs();
}
}
public void unzipFile(String name, InputStream zipStream)
{
File targetFile = new File(targetFolder, name);
if (!targetFile.getParentFile().exists())
{
targetFile.getParentFile().mkdirs();
}
FileOutputStream out = IOUtil.openOutputStream(targetFile);
try
{
IOUtil.copy(zipStream, out, buffer);
}
finally
{
IOUtil.closeSilent(out);
}
}
}
}
| {
"content_hash": "18fa0f3f8465577959c866957d24d7ae",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 99,
"avg_line_length": 22.859649122807017,
"alnum_prop": 0.5718671198333516,
"repo_name": "IHTSDO/snow-owl",
"id": "2e00d02f253c91547cf71f38982078eaf5d6d5a1",
"size": "9517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dependencies/org.eclipse.net4j.util/src/org/eclipse/net4j/util/io/ZIPUtil.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12031"
},
{
"name": "CSS",
"bytes": "97278"
},
{
"name": "ECL",
"bytes": "27450"
},
{
"name": "GAP",
"bytes": "215641"
},
{
"name": "Groovy",
"bytes": "71763"
},
{
"name": "HTML",
"bytes": "11708"
},
{
"name": "Java",
"bytes": "15201642"
},
{
"name": "JavaScript",
"bytes": "5838380"
},
{
"name": "Prolog",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "107759"
},
{
"name": "Xtend",
"bytes": "13494"
}
],
"symlink_target": ""
} |
var gulp = require('gulp')
, cache = require('gulp-cache')
, concat = require('gulp-concat')
, jshint = require('gulp-jshint')
, stylish = require('jshint-stylish')
, uglify = require('gulp-uglify')
, rename = require('gulp-rename')
, sass = require('gulp-ruby-sass')
, scsslint = require('gulp-scss-lint')
, imagemin = require('gulp-imagemin')
, frontMatter = require('gulp-front-matter')
, marked = require('gulp-marked')
, applyTemplate = require('gulp-apply-template')
, gulpFilter = require('gulp-filter')
, connect = require('gulp-connect')
, livereload = require('gulp-livereload')
, gulpIgnore = require('gulp-ignore')
, debug = require('gulp-debug')
var packageJSON = require('./package')
, jshintConfig = packageJSON.jshintConfig
var paths = {
src: 'src/'
, layout: 'layouts/'
, dest: 'dest/'
}
gulp.task('common-scripts', function() {
var scripts = gulp.src(paths.src + 'assets/js/*.js')
// validate scripts (lint)
scripts.pipe(jshint(jshintConfig))
.pipe(jshint.reporter(stylish))
.pipe(jshint.reporter('fail'))
// combine and minify scripts
return scripts.pipe(concat('main.js'))
.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(paths.dest + 'assets/js'))
})
gulp.task('specific-scripts', function() {
var scripts = gulp.src(paths.src + 'assets/js/specific/*.js')
// validate scripts (lint)
scripts.pipe(jshint(jshintConfig))
.pipe(jshint.reporter(stylish))
.pipe(jshint.reporter('fail'))
// combine and minify scripts
return scripts.pipe(rename({suffix: '.min'}))
.pipe(uglify())
.pipe(gulp.dest(paths.dest + 'assets/js'))
})
// compile sass
gulp.task('sass', function() {
return sass(paths.src + 'scss/', {style: 'compressed'})
.on('error', function (err) {
console.error('Error!', err.message)
})
.pipe(rename({suffix: '.min'}))
.pipe(gulp.dest(paths.dest + 'css'))
})
gulp.task('scss-lint', function() {
return gulp.src(paths.src + 'scss/*.scss')
.pipe(scsslint())
.pipe(scsslint.failReporter())
})
gulp.task('images', function() {
return gulp.src(paths.src + 'assets/img/**/*')
.pipe(cache(imagemin({ optimizationLevel: 5, progressive: true, interlaced: true })))
.pipe(gulp.dest(paths.dest + 'assets/img'))
})
gulp.task('assets', ['common-scripts', 'specific-scripts', 'scss-lint', 'sass', 'images'])
gulp.task('pages', function() {
var mdFilter = gulpFilter('*.md')
var pages= gulp.src(paths.src + 'pages/**/*.*')
.pipe(frontMatter({
property: 'data',
remove: true
}))
pages.pipe(gulpIgnore.include(function(file) { return Object.keys(file.data).length === 0 }))
.pipe(debug({title: 'Invalid page: missing front matter'}))
return pages.pipe(gulpIgnore.include(function(file) { return Object.keys(file.data).length !== 0 }))
.pipe(applyTemplate({
engine: 'ejs'
, template: function (context) {
return paths.src + paths.layout + context.data.template + '.ejs'
}
}))
.pipe(mdFilter)
.pipe(marked())
.pipe(mdFilter.restore())
.pipe(gulp.dest(function(file) {
return paths.dest + (file.data.lang || 'en')
}))
})
gulp.task('index', function() {
})
gulp.task('watch', function() {
gulp.watch(paths.src + 'assets/js/*.js', ['scripts'])
gulp.watch(paths.src + 'assets/scss/*.scss', ['sass'])
gulp.watch(paths.src + 'assets/img/*', ['images'])
gulp.watch(paths.src + 'pages/*', ['pages'])
})
// Default Task
gulp.task('default', ['assets', 'pages'])
| {
"content_hash": "80b15d7d69169aa48c8393313b99f95f",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 101,
"avg_line_length": 27.830645161290324,
"alnum_prop": 0.6470588235294118,
"repo_name": "Mango-information-systems/static",
"id": "d7975c3dcc3152a0623f1cde94fc2617cc3aa97c",
"size": "3453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gulpfile.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123"
},
{
"name": "HTML",
"bytes": "67"
},
{
"name": "JavaScript",
"bytes": "17572"
}
],
"symlink_target": ""
} |
<f:layout name="Default"/>
<f:section name="Main">
<div class="container">
<div class="row">
<div class="col-md-3">
<f:cObject typoscriptObjectPath="lib.dynamicContent" data="{pageUid: '{data.uid}', colPos: '1'}"/>
</div>
<main class="col-md-6" role="main">
<f:cObject typoscriptObjectPath="lib.dynamicContent" data="{pageUid: '{data.uid}', colPos: '0'}"/>
</main>
<div class="col-md-3">
<f:cObject typoscriptObjectPath="lib.dynamicContent" data="{pageUid: '{data.uid}', colPos: '2'}"/>
</div>
</div>
</div>
</f:section>
| {
"content_hash": "6d8d2821acc910322ef4e11603fe369c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 102,
"avg_line_length": 31.055555555555557,
"alnum_prop": 0.6207513416815742,
"repo_name": "buxit/bootstrap_package",
"id": "148f12988adb00fbe872bf18ac9a24375575e4ee",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Resources/Private/Templates/Page/Default3Columns.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "10656"
},
{
"name": "CSS",
"bytes": "407431"
},
{
"name": "HTML",
"bytes": "20141"
},
{
"name": "JavaScript",
"bytes": "11699"
},
{
"name": "PHP",
"bytes": "397578"
},
{
"name": "TypeScript",
"bytes": "27681"
}
],
"symlink_target": ""
} |
myList = [20, 10, 'not a number', 30]
print myList
print len(myList)
myList.append('hello')
print myList
myList.append('world')
print myList
myList.sort()
print myList
myList.reverse()
print myList
print myList[0]
print myList[-1]
# print myList[18] oeps | {
"content_hash": "9312ee1e0a420042a5f256390b05b25c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 37,
"avg_line_length": 13,
"alnum_prop": 0.7269230769230769,
"repo_name": "typemytype/roboThonWorkshop2015Examples",
"id": "90973bb12d7e2e38104bbbf632f31f14a5dd9f36",
"size": "296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basicList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7278"
}
],
"symlink_target": ""
} |
using System;
using System.Linq;
using System.Threading.Tasks;
using Microsoft.EntityFrameworkCore;
using Modix.Data.ExpandableQueries;
using Modix.Data.Models.Core;
using Modix.Data.Utilities;
namespace Modix.Data.Repositories
{
/// <summary>
/// Describes a repository for managing <see cref="UserEntity"/> and <see cref="GuildUserEntity"/> entities, within an underlying data storage provider.
/// </summary>
public interface IGuildUserRepository
{
/// <summary>
/// Begins a new transaction to create users within the repository.
/// </summary>
/// <returns>
/// A <see cref="Task"/> that will complete, with the requested transaction object,
/// when no other transactions are active upon the repository.
/// </returns>
Task<IRepositoryTransaction> BeginCreateTransactionAsync();
/// <summary>
/// Creates a new set of guild data for a user within the repository.
/// </summary>
/// <param name="data">The initial set of guild data to be created.</param>
/// <exception cref="ArgumentNullException">Throws for <paramref name="data"/>.</exception>
/// <returns>A <see cref="Task"/> which will complete when th+e operation is complete.</returns>
Task CreateAsync(GuildUserCreationData data);
/// <summary>
/// Retrieves summary information about a user.
/// </summary>
/// <param name="userId">The <see cref="GuildUserEntity.UserId"/> value of the user guild data to be retrieved.</param>
/// <param name="guildId">The <see cref="GuildUserEntity.GuildId"/> value of the user guild data to be retrieved.</param>
/// <returns>
/// A <see cref="Task"/> that will complete when the operation has completed,
/// containing the requested user guild data, or null if no such user exists.
/// </returns>
Task<GuildUserSummary> ReadSummaryAsync(ulong userId, ulong guildId);
/// <summary>
/// Attempts to update guild information about a user, based on a pair of user and guild ID values.
/// </summary>
/// <param name="userId">The <see cref="GuildUserEntity.UserId"/> value of the user guild data to be updated.</param>
/// <param name="guildId">The <see cref="GuildUserEntity.GuildId"/> value of the user guild data to be updated.</param>
/// <param name="updateAction">An action to be invoked to perform the requested update.</param>
/// <exception cref="ArgumentNullException">Throws for <paramref name="updateAction"/>.</exception>
/// <returns>
/// A <see cref="Task"/> that will complete when the operation has completed,
/// containing a flag indicating whether the requested update succeeded (I.E. whether the specified data record exists).
/// </returns>
Task<bool> TryUpdateAsync(ulong userId, ulong guildId, Action<GuildUserMutationData> updateAction);
}
/// <inheritdoc />
public class GuildUserRepository : RepositoryBase, IGuildUserRepository
{
/// <summary>
/// Creates a new <see cref="GuildUserRepository"/>.
/// See <see cref="RepositoryBase(ModixContext)"/> for details.
/// </summary>
public GuildUserRepository(ModixContext modixContext)
: base(modixContext) { }
/// <inheritdoc />
public Task<IRepositoryTransaction> BeginCreateTransactionAsync()
=> _createTransactionFactory.BeginTransactionAsync(ModixContext.Database);
/// <inheritdoc />
public async Task CreateAsync(GuildUserCreationData data)
{
if (data == null)
throw new ArgumentNullException(nameof(data));
var guildDataEntity = data.ToGuildDataEntity();
guildDataEntity.User = await ModixContext.Users.FirstOrDefaultAsync(x => x.Id == data.UserId)
?? data.ToUserEntity();
await ModixContext.GuildUsers.AddAsync(guildDataEntity);
if ((guildDataEntity.User.Username != data.Username) && !(data.Username is null))
guildDataEntity.User.Username = data.Username;
if ((guildDataEntity.User.Discriminator != data.Discriminator) && !(data.Discriminator is null))
guildDataEntity.User.Discriminator = data.Discriminator;
await ModixContext.SaveChangesAsync();
}
/// <inheritdoc />
public Task<GuildUserSummary> ReadSummaryAsync(ulong userId, ulong guildId)
{
return ModixContext.GuildUsers.AsNoTracking()
.Where(x => x.UserId == userId)
.Where(x => x.GuildId == guildId)
.AsExpandable()
.Select(GuildUserSummary.FromEntityProjection)
.FirstOrDefaultAsync();
}
/// <inheritdoc />
public async Task<bool> TryUpdateAsync(ulong userId, ulong guildId, Action<GuildUserMutationData> updateAction)
{
if (updateAction == null)
throw new ArgumentNullException(nameof(updateAction));
var entity = await ModixContext.GuildUsers
.Where(x => x.UserId == userId)
.Where(x => x.GuildId == guildId)
.Include(x => x.User)
.FirstOrDefaultAsync();
if(entity == null)
return false;
var data = GuildUserMutationData.FromEntity(entity);
updateAction.Invoke(data);
data.ApplyTo(entity);
ModixContext.UpdateProperty(entity.User, x => x.Username);
ModixContext.UpdateProperty(entity.User, x => x.Discriminator);
ModixContext.UpdateProperty(entity, x => x.Nickname);
ModixContext.UpdateProperty(entity, x => x.LastSeen);
await ModixContext.SaveChangesAsync();
return true;
}
private static readonly RepositoryTransactionFactory _createTransactionFactory
= new RepositoryTransactionFactory();
}
}
| {
"content_hash": "36ec59c04dbde9171c3307ade54c18cd",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 156,
"avg_line_length": 43.71223021582734,
"alnum_prop": 0.6298551678736011,
"repo_name": "mariocatch/MODiX",
"id": "cf5037ba9be6f96915015713b753007e39729388",
"size": "6078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Modix.Data/Repositories/GuildUserRepository.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "987782"
},
{
"name": "CSS",
"bytes": "4896"
},
{
"name": "Dockerfile",
"bytes": "504"
},
{
"name": "HTML",
"bytes": "745"
},
{
"name": "JavaScript",
"bytes": "795"
},
{
"name": "TypeScript",
"bytes": "32641"
},
{
"name": "Vue",
"bytes": "108325"
}
],
"symlink_target": ""
} |
class Engine:
def __init__(self,
response_pairs,
knowledge={}):
self.response_pairs = response_pairs
self.knowledge = knowledge
def chat(self, user_utterance, context):
best_score = 0
best_response_pair = None
best_captured = {}
for response_pair in self.response_pairs:
captured = response_pair.match(user_utterance, self.knowledge)
if captured is None:
continue
score = response_pair.score(captured, context, self.knowledge)
if best_score < score:
best_score, best_response_pair, best_captured = score, response_pair, captured
response, new_context = best_response_pair.generate(best_captured, context, self.knowledge)
return response, new_context, best_score
| {
"content_hash": "7ba14789c70314d88f4567f99d5f8bc0",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 99,
"avg_line_length": 34.16,
"alnum_prop": 0.6007025761124122,
"repo_name": "carrotflakes/invada",
"id": "01117cc0ce6b2b30fe9f0149dad59e27b0d118d4",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invada/engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15434"
}
],
"symlink_target": ""
} |
#include "config.h"
#include "RenderStyle.h"
#include "ContentData.h"
#include "CursorList.h"
#include "CSSPropertyNames.h"
#include "Font.h"
#include "FontSelector.h"
#include "QuotesData.h"
#include "RenderArena.h"
#include "RenderObject.h"
#include "ScaleTransformOperation.h"
#include "ShadowData.h"
#include "StyleImage.h"
#include "StyleInheritedData.h"
#include "StyleResolver.h"
#if ENABLE(TOUCH_EVENTS)
#include "RenderTheme.h"
#endif
#include "WebCoreMemoryInstrumentation.h"
#include <wtf/MemoryInstrumentationVector.h>
#include <wtf/StdLibExtras.h>
#include <algorithm>
#if ENABLE(TEXT_AUTOSIZING)
#include "TextAutosizer.h"
#endif
using namespace std;
namespace WebCore {
struct SameSizeAsBorderValue {
RGBA32 m_color;
unsigned m_width;
};
COMPILE_ASSERT(sizeof(BorderValue) == sizeof(SameSizeAsBorderValue), BorderValue_should_not_grow);
struct SameSizeAsRenderStyle : public RefCounted<SameSizeAsRenderStyle> {
unsigned m_bitfields;
void* dataRefs[7];
void* ownPtrs[1];
#if ENABLE(SVG)
void* dataRefSvgStyle;
#endif
struct InheritedFlags {
unsigned m_bitfields[2];
} inherited_flags;
struct NonInheritedFlags {
unsigned m_bitfields[2];
} noninherited_flags;
};
COMPILE_ASSERT(sizeof(RenderStyle) == sizeof(SameSizeAsRenderStyle), RenderStyle_should_stay_small);
inline RenderStyle* defaultStyle()
{
static RenderStyle* s_defaultStyle = RenderStyle::createDefaultStyle().leakRef();
return s_defaultStyle;
}
PassRefPtr<RenderStyle> RenderStyle::create()
{
return adoptRef(new RenderStyle());
}
PassRefPtr<RenderStyle> RenderStyle::createDefaultStyle()
{
return adoptRef(new RenderStyle(true));
}
PassRefPtr<RenderStyle> RenderStyle::createAnonymousStyleWithDisplay(const RenderStyle* parentStyle, EDisplay display)
{
RefPtr<RenderStyle> newStyle = RenderStyle::create();
newStyle->inheritFrom(parentStyle);
newStyle->inheritUnicodeBidiFrom(parentStyle);
newStyle->setDisplay(display);
return newStyle;
}
PassRefPtr<RenderStyle> RenderStyle::clone(const RenderStyle* other)
{
return adoptRef(new RenderStyle(*other));
}
ALWAYS_INLINE RenderStyle::RenderStyle()
: m_box(defaultStyle()->m_box)
, visual(defaultStyle()->visual)
, m_background(defaultStyle()->m_background)
, surround(defaultStyle()->surround)
, rareNonInheritedData(defaultStyle()->rareNonInheritedData)
, rareInheritedData(defaultStyle()->rareInheritedData)
, inherited(defaultStyle()->inherited)
#if ENABLE(SVG)
, m_svgStyle(defaultStyle()->m_svgStyle)
#endif
{
setBitDefaults(); // Would it be faster to copy this from the default style?
COMPILE_ASSERT((sizeof(InheritedFlags) <= 8), InheritedFlags_does_not_grow);
COMPILE_ASSERT((sizeof(NonInheritedFlags) <= 8), NonInheritedFlags_does_not_grow);
}
ALWAYS_INLINE RenderStyle::RenderStyle(bool)
{
setBitDefaults();
m_box.init();
visual.init();
m_background.init();
surround.init();
rareNonInheritedData.init();
rareNonInheritedData.access()->m_deprecatedFlexibleBox.init();
rareNonInheritedData.access()->m_flexibleBox.init();
rareNonInheritedData.access()->m_marquee.init();
rareNonInheritedData.access()->m_multiCol.init();
rareNonInheritedData.access()->m_transform.init();
#if ENABLE(CSS_FILTERS)
rareNonInheritedData.access()->m_filter.init();
#endif
rareNonInheritedData.access()->m_grid.init();
rareNonInheritedData.access()->m_gridItem.init();
rareInheritedData.init();
inherited.init();
#if ENABLE(SVG)
m_svgStyle.init();
#endif
}
ALWAYS_INLINE RenderStyle::RenderStyle(const RenderStyle& o)
: RefCounted<RenderStyle>()
, m_box(o.m_box)
, visual(o.visual)
, m_background(o.m_background)
, surround(o.surround)
, rareNonInheritedData(o.rareNonInheritedData)
, rareInheritedData(o.rareInheritedData)
, inherited(o.inherited)
#if ENABLE(SVG)
, m_svgStyle(o.m_svgStyle)
#endif
, inherited_flags(o.inherited_flags)
, noninherited_flags(o.noninherited_flags)
{
}
void RenderStyle::inheritFrom(const RenderStyle* inheritParent, IsAtShadowBoundary isAtShadowBoundary)
{
if (isAtShadowBoundary == AtShadowBoundary) {
// Even if surrounding content is user-editable, shadow DOM should act as a single unit, and not necessarily be editable
EUserModify currentUserModify = userModify();
rareInheritedData = inheritParent->rareInheritedData;
setUserModify(currentUserModify);
} else
rareInheritedData = inheritParent->rareInheritedData;
inherited = inheritParent->inherited;
inherited_flags = inheritParent->inherited_flags;
#if ENABLE(SVG)
if (m_svgStyle != inheritParent->m_svgStyle)
m_svgStyle.access()->inheritFrom(inheritParent->m_svgStyle.get());
#endif
}
void RenderStyle::copyNonInheritedFrom(const RenderStyle* other)
{
m_box = other->m_box;
visual = other->visual;
m_background = other->m_background;
surround = other->surround;
rareNonInheritedData = other->rareNonInheritedData;
// The flags are copied one-by-one because noninherited_flags contains a bunch of stuff other than real style data.
noninherited_flags._effectiveDisplay = other->noninherited_flags._effectiveDisplay;
noninherited_flags._originalDisplay = other->noninherited_flags._originalDisplay;
noninherited_flags._overflowX = other->noninherited_flags._overflowX;
noninherited_flags._overflowY = other->noninherited_flags._overflowY;
noninherited_flags._vertical_align = other->noninherited_flags._vertical_align;
noninherited_flags._clear = other->noninherited_flags._clear;
noninherited_flags._position = other->noninherited_flags._position;
noninherited_flags._floating = other->noninherited_flags._floating;
noninherited_flags._table_layout = other->noninherited_flags._table_layout;
noninherited_flags._unicodeBidi = other->noninherited_flags._unicodeBidi;
noninherited_flags._page_break_before = other->noninherited_flags._page_break_before;
noninherited_flags._page_break_after = other->noninherited_flags._page_break_after;
noninherited_flags._page_break_inside = other->noninherited_flags._page_break_inside;
#if ENABLE(SVG)
if (m_svgStyle != other->m_svgStyle)
m_svgStyle.access()->copyNonInheritedFrom(other->m_svgStyle.get());
#endif
ASSERT(zoom() == initialZoom());
}
bool RenderStyle::operator==(const RenderStyle& o) const
{
// compare everything except the pseudoStyle pointer
return inherited_flags == o.inherited_flags
&& noninherited_flags == o.noninherited_flags
&& m_box == o.m_box
&& visual == o.visual
&& m_background == o.m_background
&& surround == o.surround
&& rareNonInheritedData == o.rareNonInheritedData
&& rareInheritedData == o.rareInheritedData
&& inherited == o.inherited
#if ENABLE(SVG)
&& m_svgStyle == o.m_svgStyle
#endif
;
}
bool RenderStyle::isStyleAvailable() const
{
return this != StyleResolver::styleNotYetAvailable();
}
static inline int pseudoBit(PseudoId pseudo)
{
return 1 << (pseudo - 1);
}
bool RenderStyle::hasAnyPublicPseudoStyles() const
{
return PUBLIC_PSEUDOID_MASK & noninherited_flags._pseudoBits;
}
bool RenderStyle::hasPseudoStyle(PseudoId pseudo) const
{
ASSERT(pseudo > NOPSEUDO);
ASSERT(pseudo < FIRST_INTERNAL_PSEUDOID);
return pseudoBit(pseudo) & noninherited_flags._pseudoBits;
}
void RenderStyle::setHasPseudoStyle(PseudoId pseudo)
{
ASSERT(pseudo > NOPSEUDO);
ASSERT(pseudo < FIRST_INTERNAL_PSEUDOID);
noninherited_flags._pseudoBits |= pseudoBit(pseudo);
}
RenderStyle* RenderStyle::getCachedPseudoStyle(PseudoId pid) const
{
if (!m_cachedPseudoStyles || !m_cachedPseudoStyles->size())
return 0;
if (styleType() != NOPSEUDO)
return 0;
for (size_t i = 0; i < m_cachedPseudoStyles->size(); ++i) {
RenderStyle* pseudoStyle = m_cachedPseudoStyles->at(i).get();
if (pseudoStyle->styleType() == pid)
return pseudoStyle;
}
return 0;
}
RenderStyle* RenderStyle::addCachedPseudoStyle(PassRefPtr<RenderStyle> pseudo)
{
if (!pseudo)
return 0;
ASSERT(pseudo->styleType() > NOPSEUDO);
RenderStyle* result = pseudo.get();
if (!m_cachedPseudoStyles)
m_cachedPseudoStyles = adoptPtr(new PseudoStyleCache);
m_cachedPseudoStyles->append(pseudo);
return result;
}
void RenderStyle::removeCachedPseudoStyle(PseudoId pid)
{
if (!m_cachedPseudoStyles)
return;
for (size_t i = 0; i < m_cachedPseudoStyles->size(); ++i) {
RenderStyle* pseudoStyle = m_cachedPseudoStyles->at(i).get();
if (pseudoStyle->styleType() == pid) {
m_cachedPseudoStyles->remove(i);
return;
}
}
}
bool RenderStyle::inheritedNotEqual(const RenderStyle* other) const
{
return inherited_flags != other->inherited_flags
|| inherited != other->inherited
#if ENABLE(SVG)
|| m_svgStyle->inheritedNotEqual(other->m_svgStyle.get())
#endif
|| rareInheritedData != other->rareInheritedData;
}
bool RenderStyle::inheritedDataShared(const RenderStyle* other) const
{
// This is a fast check that only looks if the data structures are shared.
return inherited_flags == other->inherited_flags
&& inherited.get() == other->inherited.get()
#if ENABLE(SVG)
&& m_svgStyle.get() == other->m_svgStyle.get()
#endif
&& rareInheritedData.get() == other->rareInheritedData.get();
}
static bool positionedObjectMoved(const LengthBox& a, const LengthBox& b)
{
// If any unit types are different, then we can't guarantee
// that this was just a movement.
if (a.left().type() != b.left().type()
|| a.right().type() != b.right().type()
|| a.top().type() != b.top().type()
|| a.bottom().type() != b.bottom().type())
return false;
// Only one unit can be non-auto in the horizontal direction and
// in the vertical direction. Otherwise the adjustment of values
// is changing the size of the box.
if (!a.left().isIntrinsicOrAuto() && !a.right().isIntrinsicOrAuto())
return false;
if (!a.top().isIntrinsicOrAuto() && !a.bottom().isIntrinsicOrAuto())
return false;
// One of the units is fixed or percent in both directions and stayed
// that way in the new style. Therefore all we are doing is moving.
return true;
}
StyleDifference RenderStyle::diff(const RenderStyle* other, unsigned& changedContextSensitiveProperties) const
{
changedContextSensitiveProperties = ContextSensitivePropertyNone;
#if ENABLE(SVG)
StyleDifference svgChange = StyleDifferenceEqual;
if (m_svgStyle != other->m_svgStyle) {
svgChange = m_svgStyle->diff(other->m_svgStyle.get());
if (svgChange == StyleDifferenceLayout)
return svgChange;
}
#endif
if (m_box->width() != other->m_box->width()
|| m_box->minWidth() != other->m_box->minWidth()
|| m_box->maxWidth() != other->m_box->maxWidth()
|| m_box->height() != other->m_box->height()
|| m_box->minHeight() != other->m_box->minHeight()
|| m_box->maxHeight() != other->m_box->maxHeight())
return StyleDifferenceLayout;
if (m_box->verticalAlign() != other->m_box->verticalAlign() || noninherited_flags._vertical_align != other->noninherited_flags._vertical_align)
return StyleDifferenceLayout;
if (m_box->boxSizing() != other->m_box->boxSizing())
return StyleDifferenceLayout;
if (surround->margin != other->surround->margin)
return StyleDifferenceLayout;
if (surround->padding != other->surround->padding)
return StyleDifferenceLayout;
if (rareNonInheritedData.get() != other->rareNonInheritedData.get()) {
if (rareNonInheritedData->m_appearance != other->rareNonInheritedData->m_appearance
|| rareNonInheritedData->marginBeforeCollapse != other->rareNonInheritedData->marginBeforeCollapse
|| rareNonInheritedData->marginAfterCollapse != other->rareNonInheritedData->marginAfterCollapse
|| rareNonInheritedData->lineClamp != other->rareNonInheritedData->lineClamp
|| rareNonInheritedData->textOverflow != other->rareNonInheritedData->textOverflow)
return StyleDifferenceLayout;
if (rareNonInheritedData->m_regionOverflow != other->rareNonInheritedData->m_regionOverflow)
return StyleDifferenceLayout;
if (rareNonInheritedData->m_wrapFlow != other->rareNonInheritedData->m_wrapFlow
|| rareNonInheritedData->m_wrapThrough != other->rareNonInheritedData->m_wrapThrough
|| rareNonInheritedData->m_wrapMargin != other->rareNonInheritedData->m_wrapMargin
|| rareNonInheritedData->m_wrapPadding != other->rareNonInheritedData->m_wrapPadding)
return StyleDifferenceLayout;
if (rareNonInheritedData->m_deprecatedFlexibleBox.get() != other->rareNonInheritedData->m_deprecatedFlexibleBox.get()
&& *rareNonInheritedData->m_deprecatedFlexibleBox.get() != *other->rareNonInheritedData->m_deprecatedFlexibleBox.get())
return StyleDifferenceLayout;
if (rareNonInheritedData->m_flexibleBox.get() != other->rareNonInheritedData->m_flexibleBox.get()
&& *rareNonInheritedData->m_flexibleBox.get() != *other->rareNonInheritedData->m_flexibleBox.get())
return StyleDifferenceLayout;
if (rareNonInheritedData->m_order != other->rareNonInheritedData->m_order
|| rareNonInheritedData->m_alignContent != other->rareNonInheritedData->m_alignContent
|| rareNonInheritedData->m_alignItems != other->rareNonInheritedData->m_alignItems
|| rareNonInheritedData->m_alignSelf != other->rareNonInheritedData->m_alignSelf
|| rareNonInheritedData->m_justifyContent != other->rareNonInheritedData->m_justifyContent)
return StyleDifferenceLayout;
// FIXME: We should add an optimized form of layout that just recomputes visual overflow.
if (!rareNonInheritedData->shadowDataEquivalent(*other->rareNonInheritedData.get()))
return StyleDifferenceLayout;
if (!rareNonInheritedData->reflectionDataEquivalent(*other->rareNonInheritedData.get()))
return StyleDifferenceLayout;
if (rareNonInheritedData->m_multiCol.get() != other->rareNonInheritedData->m_multiCol.get()
&& *rareNonInheritedData->m_multiCol.get() != *other->rareNonInheritedData->m_multiCol.get())
return StyleDifferenceLayout;
if (rareNonInheritedData->m_transform.get() != other->rareNonInheritedData->m_transform.get()
&& *rareNonInheritedData->m_transform.get() != *other->rareNonInheritedData->m_transform.get()) {
#if USE(ACCELERATED_COMPOSITING)
changedContextSensitiveProperties |= ContextSensitivePropertyTransform;
// Don't return; keep looking for another change
#else
return StyleDifferenceLayout;
#endif
}
if (rareNonInheritedData->m_grid.get() != other->rareNonInheritedData->m_grid.get()
&& rareNonInheritedData->m_gridItem.get() != other->rareNonInheritedData->m_gridItem.get())
return StyleDifferenceLayout;
#if !USE(ACCELERATED_COMPOSITING)
if (rareNonInheritedData.get() != other->rareNonInheritedData.get()) {
if (rareNonInheritedData->m_transformStyle3D != other->rareNonInheritedData->m_transformStyle3D
|| rareNonInheritedData->m_backfaceVisibility != other->rareNonInheritedData->m_backfaceVisibility
|| rareNonInheritedData->m_perspective != other->rareNonInheritedData->m_perspective
|| rareNonInheritedData->m_perspectiveOriginX != other->rareNonInheritedData->m_perspectiveOriginX
|| rareNonInheritedData->m_perspectiveOriginY != other->rareNonInheritedData->m_perspectiveOriginY)
return StyleDifferenceLayout;
}
#endif
#if ENABLE(DASHBOARD_SUPPORT)
// If regions change, trigger a relayout to re-calc regions.
if (rareNonInheritedData->m_dashboardRegions != other->rareNonInheritedData->m_dashboardRegions)
return StyleDifferenceLayout;
#endif
#if ENABLE(CSS_EXCLUSIONS)
if (rareNonInheritedData->m_shapeInside != other->rareNonInheritedData->m_shapeInside)
return StyleDifferenceLayout;
#endif
}
if (rareInheritedData.get() != other->rareInheritedData.get()) {
if (rareInheritedData->highlight != other->rareInheritedData->highlight
|| rareInheritedData->indent != other->rareInheritedData->indent
|| rareInheritedData->m_effectiveZoom != other->rareInheritedData->m_effectiveZoom
|| rareInheritedData->textSizeAdjust != other->rareInheritedData->textSizeAdjust
|| rareInheritedData->wordBreak != other->rareInheritedData->wordBreak
|| rareInheritedData->overflowWrap != other->rareInheritedData->overflowWrap
|| rareInheritedData->nbspMode != other->rareInheritedData->nbspMode
|| rareInheritedData->khtmlLineBreak != other->rareInheritedData->khtmlLineBreak
|| rareInheritedData->textSecurity != other->rareInheritedData->textSecurity
|| rareInheritedData->hyphens != other->rareInheritedData->hyphens
|| rareInheritedData->hyphenationLimitBefore != other->rareInheritedData->hyphenationLimitBefore
|| rareInheritedData->hyphenationLimitAfter != other->rareInheritedData->hyphenationLimitAfter
|| rareInheritedData->hyphenationString != other->rareInheritedData->hyphenationString
|| rareInheritedData->locale != other->rareInheritedData->locale
|| rareInheritedData->textEmphasisMark != other->rareInheritedData->textEmphasisMark
|| rareInheritedData->textEmphasisPosition != other->rareInheritedData->textEmphasisPosition
|| rareInheritedData->textEmphasisCustomMark != other->rareInheritedData->textEmphasisCustomMark
|| rareInheritedData->m_tabSize != other->rareInheritedData->m_tabSize
|| rareInheritedData->m_lineBoxContain != other->rareInheritedData->m_lineBoxContain
|| rareInheritedData->m_lineGrid != other->rareInheritedData->m_lineGrid
#if ENABLE(CSS_IMAGE_RESOLUTION)
|| rareInheritedData->m_imageResolutionSource != other->rareInheritedData->m_imageResolutionSource
|| rareInheritedData->m_imageResolutionSnap != other->rareInheritedData->m_imageResolutionSnap
|| rareInheritedData->m_imageResolution != other->rareInheritedData->m_imageResolution
#endif
|| rareInheritedData->m_lineSnap != other->rareInheritedData->m_lineSnap
|| rareInheritedData->m_lineAlign != other->rareInheritedData->m_lineAlign)
return StyleDifferenceLayout;
if (!rareInheritedData->shadowDataEquivalent(*other->rareInheritedData.get()))
return StyleDifferenceLayout;
if (textStrokeWidth() != other->textStrokeWidth())
return StyleDifferenceLayout;
}
#if ENABLE(TEXT_AUTOSIZING)
if (visual->m_textAutosizingMultiplier != other->visual->m_textAutosizingMultiplier)
return StyleDifferenceLayout;
#endif
if (inherited->line_height != other->inherited->line_height
|| inherited->list_style_image != other->inherited->list_style_image
|| inherited->font != other->inherited->font
|| inherited->horizontal_border_spacing != other->inherited->horizontal_border_spacing
|| inherited->vertical_border_spacing != other->inherited->vertical_border_spacing
|| inherited_flags._box_direction != other->inherited_flags._box_direction
|| inherited_flags.m_rtlOrdering != other->inherited_flags.m_rtlOrdering
|| noninherited_flags._position != other->noninherited_flags._position
|| noninherited_flags._floating != other->noninherited_flags._floating
|| noninherited_flags._originalDisplay != other->noninherited_flags._originalDisplay)
return StyleDifferenceLayout;
if (((int)noninherited_flags._effectiveDisplay) >= TABLE) {
if (inherited_flags._border_collapse != other->inherited_flags._border_collapse
|| inherited_flags._empty_cells != other->inherited_flags._empty_cells
|| inherited_flags._caption_side != other->inherited_flags._caption_side
|| noninherited_flags._table_layout != other->noninherited_flags._table_layout)
return StyleDifferenceLayout;
// In the collapsing border model, 'hidden' suppresses other borders, while 'none'
// does not, so these style differences can be width differences.
if (inherited_flags._border_collapse
&& ((borderTopStyle() == BHIDDEN && other->borderTopStyle() == BNONE)
|| (borderTopStyle() == BNONE && other->borderTopStyle() == BHIDDEN)
|| (borderBottomStyle() == BHIDDEN && other->borderBottomStyle() == BNONE)
|| (borderBottomStyle() == BNONE && other->borderBottomStyle() == BHIDDEN)
|| (borderLeftStyle() == BHIDDEN && other->borderLeftStyle() == BNONE)
|| (borderLeftStyle() == BNONE && other->borderLeftStyle() == BHIDDEN)
|| (borderRightStyle() == BHIDDEN && other->borderRightStyle() == BNONE)
|| (borderRightStyle() == BNONE && other->borderRightStyle() == BHIDDEN)))
return StyleDifferenceLayout;
}
if (noninherited_flags._effectiveDisplay == LIST_ITEM) {
if (inherited_flags._list_style_type != other->inherited_flags._list_style_type
|| inherited_flags._list_style_position != other->inherited_flags._list_style_position)
return StyleDifferenceLayout;
}
if (inherited_flags._text_align != other->inherited_flags._text_align
|| inherited_flags._text_transform != other->inherited_flags._text_transform
|| inherited_flags._direction != other->inherited_flags._direction
|| inherited_flags._white_space != other->inherited_flags._white_space
|| noninherited_flags._clear != other->noninherited_flags._clear
|| noninherited_flags._unicodeBidi != other->noninherited_flags._unicodeBidi)
return StyleDifferenceLayout;
// Check block flow direction.
if (inherited_flags.m_writingMode != other->inherited_flags.m_writingMode)
return StyleDifferenceLayout;
// Check text combine mode.
if (rareNonInheritedData->m_textCombine != other->rareNonInheritedData->m_textCombine)
return StyleDifferenceLayout;
// Overflow returns a layout hint.
if (noninherited_flags._overflowX != other->noninherited_flags._overflowX
|| noninherited_flags._overflowY != other->noninherited_flags._overflowY)
return StyleDifferenceLayout;
// If our border widths change, then we need to layout. Other changes to borders
// only necessitate a repaint.
if (borderLeftWidth() != other->borderLeftWidth()
|| borderTopWidth() != other->borderTopWidth()
|| borderBottomWidth() != other->borderBottomWidth()
|| borderRightWidth() != other->borderRightWidth())
return StyleDifferenceLayout;
// If the counter directives change, trigger a relayout to re-calculate counter values and rebuild the counter node tree.
const CounterDirectiveMap* mapA = rareNonInheritedData->m_counterDirectives.get();
const CounterDirectiveMap* mapB = other->rareNonInheritedData->m_counterDirectives.get();
if (!(mapA == mapB || (mapA && mapB && *mapA == *mapB)))
return StyleDifferenceLayout;
if ((visibility() == COLLAPSE) != (other->visibility() == COLLAPSE))
return StyleDifferenceLayout;
if ((rareNonInheritedData->opacity == 1 && other->rareNonInheritedData->opacity < 1)
|| (rareNonInheritedData->opacity < 1 && other->rareNonInheritedData->opacity == 1)) {
// FIXME: We would like to use SimplifiedLayout here, but we can't quite do that yet.
// We need to make sure SimplifiedLayout can operate correctly on RenderInlines (we will need
// to add a selfNeedsSimplifiedLayout bit in order to not get confused and taint every line).
// In addition we need to solve the floating object issue when layers come and go. Right now
// a full layout is necessary to keep floating object lists sane.
return StyleDifferenceLayout;
}
if (!QuotesData::equals(rareInheritedData->quotes.get(), other->rareInheritedData->quotes.get()))
return StyleDifferenceLayout;
#if ENABLE(SVG)
// SVGRenderStyle::diff() might have returned StyleDifferenceRepaint, eg. if fill changes.
// If eg. the font-size changed at the same time, we're not allowed to return StyleDifferenceRepaint,
// but have to return StyleDifferenceLayout, that's why this if branch comes after all branches
// that are relevant for SVG and might return StyleDifferenceLayout.
if (svgChange != StyleDifferenceEqual)
return svgChange;
#endif
// Make sure these left/top/right/bottom checks stay below all layout checks and above
// all visible checks.
if (position() != StaticPosition) {
if (surround->offset != other->surround->offset) {
// Optimize for the case where a positioned layer is moving but not changing size.
if (position() == AbsolutePosition && positionedObjectMoved(surround->offset, other->surround->offset))
return StyleDifferenceLayoutPositionedMovementOnly;
// FIXME: We would like to use SimplifiedLayout for relative positioning, but we can't quite do that yet.
// We need to make sure SimplifiedLayout can operate correctly on RenderInlines (we will need
// to add a selfNeedsSimplifiedLayout bit in order to not get confused and taint every line).
return StyleDifferenceLayout;
} else if (m_box->zIndex() != other->m_box->zIndex() || m_box->hasAutoZIndex() != other->m_box->hasAutoZIndex()
|| visual->clip != other->visual->clip || visual->hasClip != other->visual->hasClip)
return StyleDifferenceRepaintLayer;
}
#if ENABLE(CSS_COMPOSITING)
if (rareNonInheritedData->m_effectiveBlendMode != other->rareNonInheritedData->m_effectiveBlendMode)
return StyleDifferenceRepaintLayer;
#endif
if (rareNonInheritedData->opacity != other->rareNonInheritedData->opacity) {
#if USE(ACCELERATED_COMPOSITING)
changedContextSensitiveProperties |= ContextSensitivePropertyOpacity;
// Don't return; keep looking for another change.
#else
return StyleDifferenceRepaintLayer;
#endif
}
#if ENABLE(CSS_FILTERS)
if (rareNonInheritedData->m_filter.get() != other->rareNonInheritedData->m_filter.get()
&& *rareNonInheritedData->m_filter.get() != *other->rareNonInheritedData->m_filter.get()) {
#if USE(ACCELERATED_COMPOSITING)
changedContextSensitiveProperties |= ContextSensitivePropertyFilter;
// Don't return; keep looking for another change.
#else
return StyleDifferenceRepaintLayer;
#endif
}
#endif
if (rareNonInheritedData->m_mask != other->rareNonInheritedData->m_mask
|| rareNonInheritedData->m_maskBoxImage != other->rareNonInheritedData->m_maskBoxImage)
return StyleDifferenceRepaintLayer;
if (inherited->color != other->inherited->color
|| inherited_flags._visibility != other->inherited_flags._visibility
|| inherited_flags._text_decorations != other->inherited_flags._text_decorations
|| inherited_flags.m_printColorAdjust != other->inherited_flags.m_printColorAdjust
|| inherited_flags._insideLink != other->inherited_flags._insideLink
|| surround->border != other->surround->border
|| *m_background.get() != *other->m_background.get()
|| visual->textDecoration != other->visual->textDecoration
|| rareInheritedData->userModify != other->rareInheritedData->userModify
|| rareInheritedData->userSelect != other->rareInheritedData->userSelect
|| rareNonInheritedData->userDrag != other->rareNonInheritedData->userDrag
|| rareNonInheritedData->m_borderFit != other->rareNonInheritedData->m_borderFit
#if ENABLE(CSS3_TEXT_DECORATION)
|| rareNonInheritedData->m_textDecorationStyle != other->rareNonInheritedData->m_textDecorationStyle
#endif // CSS3_TEXT_DECORATION
|| rareInheritedData->textFillColor != other->rareInheritedData->textFillColor
|| rareInheritedData->textStrokeColor != other->rareInheritedData->textStrokeColor
|| rareInheritedData->textEmphasisColor != other->rareInheritedData->textEmphasisColor
|| rareInheritedData->textEmphasisFill != other->rareInheritedData->textEmphasisFill
|| rareInheritedData->m_imageRendering != other->rareInheritedData->m_imageRendering)
return StyleDifferenceRepaint;
// FIXME: The current spec is being reworked to remove dependencies between exclusions and affected
// content. There's a proposal to use floats instead. In that case, wrap-shape should actually relayout
// the parent container. For sure, I will have to revisit this code, but for now I've added this in order
// to avoid having diff() == StyleDifferenceEqual where wrap-shapes actually differ.
// Tracking bug: https://bugs.webkit.org/show_bug.cgi?id=62991
if (rareNonInheritedData->m_shapeOutside != other->rareNonInheritedData->m_shapeOutside)
return StyleDifferenceRepaint;
if (rareNonInheritedData->m_clipPath != other->rareNonInheritedData->m_clipPath)
return StyleDifferenceRepaint;
#if USE(ACCELERATED_COMPOSITING)
if (rareNonInheritedData.get() != other->rareNonInheritedData.get()) {
if (rareNonInheritedData->m_transformStyle3D != other->rareNonInheritedData->m_transformStyle3D
|| rareNonInheritedData->m_backfaceVisibility != other->rareNonInheritedData->m_backfaceVisibility
|| rareNonInheritedData->m_perspective != other->rareNonInheritedData->m_perspective
|| rareNonInheritedData->m_perspectiveOriginX != other->rareNonInheritedData->m_perspectiveOriginX
|| rareNonInheritedData->m_perspectiveOriginY != other->rareNonInheritedData->m_perspectiveOriginY)
return StyleDifferenceRecompositeLayer;
}
#endif
// Cursors are not checked, since they will be set appropriately in response to mouse events,
// so they don't need to cause any repaint or layout.
// Animations don't need to be checked either. We always set the new style on the RenderObject, so we will get a chance to fire off
// the resulting transition properly.
return StyleDifferenceEqual;
}
void RenderStyle::setClip(Length top, Length right, Length bottom, Length left)
{
StyleVisualData* data = visual.access();
data->clip.m_top = top;
data->clip.m_right = right;
data->clip.m_bottom = bottom;
data->clip.m_left = left;
}
void RenderStyle::addCursor(PassRefPtr<StyleImage> image, const IntPoint& hotSpot)
{
if (!rareInheritedData.access()->cursorData)
rareInheritedData.access()->cursorData = CursorList::create();
rareInheritedData.access()->cursorData->append(CursorData(image, hotSpot));
}
void RenderStyle::setCursorList(PassRefPtr<CursorList> other)
{
rareInheritedData.access()->cursorData = other;
}
void RenderStyle::setQuotes(PassRefPtr<QuotesData> q)
{
if (QuotesData::equals(rareInheritedData->quotes.get(), q.get()))
return;
rareInheritedData.access()->quotes = q;
}
void RenderStyle::clearCursorList()
{
if (rareInheritedData->cursorData)
rareInheritedData.access()->cursorData = 0;
}
void RenderStyle::clearContent()
{
if (rareNonInheritedData->m_content)
rareNonInheritedData.access()->m_content = nullptr;
}
void RenderStyle::appendContent(PassOwnPtr<ContentData> contentData)
{
OwnPtr<ContentData>& content = rareNonInheritedData.access()->m_content;
ContentData* lastContent = content.get();
while (lastContent && lastContent->next())
lastContent = lastContent->next();
if (lastContent)
lastContent->setNext(contentData);
else
content = contentData;
}
void RenderStyle::setContent(PassRefPtr<StyleImage> image, bool add)
{
if (!image)
return;
if (add) {
appendContent(ContentData::create(image));
return;
}
rareNonInheritedData.access()->m_content = ContentData::create(image);
}
void RenderStyle::setContent(const String& string, bool add)
{
OwnPtr<ContentData>& content = rareNonInheritedData.access()->m_content;
if (add) {
ContentData* lastContent = content.get();
while (lastContent && lastContent->next())
lastContent = lastContent->next();
if (lastContent) {
// We attempt to merge with the last ContentData if possible.
if (lastContent->isText()) {
TextContentData* textContent = static_cast<TextContentData*>(lastContent);
textContent->setText(textContent->text() + string);
} else
lastContent->setNext(ContentData::create(string));
return;
}
}
content = ContentData::create(string);
}
void RenderStyle::setContent(PassOwnPtr<CounterContent> counter, bool add)
{
if (!counter)
return;
if (add) {
appendContent(ContentData::create(counter));
return;
}
rareNonInheritedData.access()->m_content = ContentData::create(counter);
}
void RenderStyle::setContent(QuoteType quote, bool add)
{
if (add) {
appendContent(ContentData::create(quote));
return;
}
rareNonInheritedData.access()->m_content = ContentData::create(quote);
}
inline bool requireTransformOrigin(const Vector<RefPtr<TransformOperation> >& transformOperations, RenderStyle::ApplyTransformOrigin applyOrigin)
{
// transform-origin brackets the transform with translate operations.
// Optimize for the case where the only transform is a translation, since the transform-origin is irrelevant
// in that case.
if (applyOrigin != RenderStyle::IncludeTransformOrigin)
return false;
unsigned size = transformOperations.size();
for (unsigned i = 0; i < size; ++i) {
TransformOperation::OperationType type = transformOperations[i]->getOperationType();
if (type != TransformOperation::TRANSLATE_X
&& type != TransformOperation::TRANSLATE_Y
&& type != TransformOperation::TRANSLATE
&& type != TransformOperation::TRANSLATE_Z
&& type != TransformOperation::TRANSLATE_3D)
return true;
}
return false;
}
void RenderStyle::applyTransform(TransformationMatrix& transform, const LayoutSize& borderBoxSize, ApplyTransformOrigin applyOrigin) const
{
// FIXME: when subpixel layout is supported (bug 71143) the body of this function could be replaced by
// applyTransform(transform, FloatRect(FloatPoint(), borderBoxSize), applyOrigin);
const Vector<RefPtr<TransformOperation> >& transformOperations = rareNonInheritedData->m_transform->m_operations.operations();
bool applyTransformOrigin = requireTransformOrigin(transformOperations, applyOrigin);
if (applyTransformOrigin)
transform.translate3d(floatValueForLength(transformOriginX(), borderBoxSize.width()), floatValueForLength(transformOriginY(), borderBoxSize.height()), transformOriginZ());
unsigned size = transformOperations.size();
for (unsigned i = 0; i < size; ++i)
transformOperations[i]->apply(transform, borderBoxSize);
if (applyTransformOrigin)
transform.translate3d(-floatValueForLength(transformOriginX(), borderBoxSize.width()), -floatValueForLength(transformOriginY(), borderBoxSize.height()), -transformOriginZ());
}
void RenderStyle::applyTransform(TransformationMatrix& transform, const FloatRect& boundingBox, ApplyTransformOrigin applyOrigin) const
{
const Vector<RefPtr<TransformOperation> >& transformOperations = rareNonInheritedData->m_transform->m_operations.operations();
bool applyTransformOrigin = requireTransformOrigin(transformOperations, applyOrigin);
float offsetX = transformOriginX().type() == Percent ? boundingBox.x() : 0;
float offsetY = transformOriginY().type() == Percent ? boundingBox.y() : 0;
if (applyTransformOrigin) {
transform.translate3d(floatValueForLength(transformOriginX(), boundingBox.width()) + offsetX,
floatValueForLength(transformOriginY(), boundingBox.height()) + offsetY,
transformOriginZ());
}
unsigned size = transformOperations.size();
for (unsigned i = 0; i < size; ++i)
transformOperations[i]->apply(transform, boundingBox.size());
if (applyTransformOrigin) {
transform.translate3d(-floatValueForLength(transformOriginX(), boundingBox.width()) - offsetX,
-floatValueForLength(transformOriginY(), boundingBox.height()) - offsetY,
-transformOriginZ());
}
}
void RenderStyle::setPageScaleTransform(float scale)
{
if (scale == 1)
return;
TransformOperations transform;
transform.operations().append(ScaleTransformOperation::create(scale, scale, ScaleTransformOperation::SCALE));
setTransform(transform);
setTransformOriginX(Length(0, Fixed));
setTransformOriginY(Length(0, Fixed));
}
void RenderStyle::setTextShadow(PassOwnPtr<ShadowData> shadowData, bool add)
{
ASSERT(!shadowData || (!shadowData->spread() && shadowData->style() == Normal));
StyleRareInheritedData* rareData = rareInheritedData.access();
if (!add) {
rareData->textShadow = shadowData;
return;
}
shadowData->setNext(rareData->textShadow.release());
rareData->textShadow = shadowData;
}
void RenderStyle::setBoxShadow(PassOwnPtr<ShadowData> shadowData, bool add)
{
StyleRareNonInheritedData* rareData = rareNonInheritedData.access();
if (!add) {
rareData->m_boxShadow = shadowData;
return;
}
shadowData->setNext(rareData->m_boxShadow.release());
rareData->m_boxShadow = shadowData;
}
static RoundedRect::Radii calcRadiiFor(const BorderData& border, IntSize size, RenderView* renderView)
{
return RoundedRect::Radii(
IntSize(valueForLength(border.topLeft().width(), size.width(), renderView),
valueForLength(border.topLeft().height(), size.height(), renderView)),
IntSize(valueForLength(border.topRight().width(), size.width(), renderView),
valueForLength(border.topRight().height(), size.height(), renderView)),
IntSize(valueForLength(border.bottomLeft().width(), size.width(), renderView),
valueForLength(border.bottomLeft().height(), size.height(), renderView)),
IntSize(valueForLength(border.bottomRight().width(), size.width(), renderView),
valueForLength(border.bottomRight().height(), size.height(), renderView)));
}
static float calcConstraintScaleFor(const IntRect& rect, const RoundedRect::Radii& radii)
{
// Constrain corner radii using CSS3 rules:
// http://www.w3.org/TR/css3-background/#the-border-radius
float factor = 1;
unsigned radiiSum;
// top
radiiSum = static_cast<unsigned>(radii.topLeft().width()) + static_cast<unsigned>(radii.topRight().width()); // Casts to avoid integer overflow.
if (radiiSum > static_cast<unsigned>(rect.width()))
factor = min(static_cast<float>(rect.width()) / radiiSum, factor);
// bottom
radiiSum = static_cast<unsigned>(radii.bottomLeft().width()) + static_cast<unsigned>(radii.bottomRight().width());
if (radiiSum > static_cast<unsigned>(rect.width()))
factor = min(static_cast<float>(rect.width()) / radiiSum, factor);
// left
radiiSum = static_cast<unsigned>(radii.topLeft().height()) + static_cast<unsigned>(radii.bottomLeft().height());
if (radiiSum > static_cast<unsigned>(rect.height()))
factor = min(static_cast<float>(rect.height()) / radiiSum, factor);
// right
radiiSum = static_cast<unsigned>(radii.topRight().height()) + static_cast<unsigned>(radii.bottomRight().height());
if (radiiSum > static_cast<unsigned>(rect.height()))
factor = min(static_cast<float>(rect.height()) / radiiSum, factor);
ASSERT(factor <= 1);
return factor;
}
StyleImage* RenderStyle::listStyleImage() const { return inherited->list_style_image.get(); }
void RenderStyle::setListStyleImage(PassRefPtr<StyleImage> v)
{
if (inherited->list_style_image != v)
inherited.access()->list_style_image = v;
}
Color RenderStyle::color() const { return inherited->color; }
Color RenderStyle::visitedLinkColor() const { return inherited->visitedLinkColor; }
void RenderStyle::setColor(const Color& v) { SET_VAR(inherited, color, v) };
void RenderStyle::setVisitedLinkColor(const Color& v) { SET_VAR(inherited, visitedLinkColor, v) }
short RenderStyle::horizontalBorderSpacing() const { return inherited->horizontal_border_spacing; }
short RenderStyle::verticalBorderSpacing() const { return inherited->vertical_border_spacing; }
void RenderStyle::setHorizontalBorderSpacing(short v) { SET_VAR(inherited, horizontal_border_spacing, v) }
void RenderStyle::setVerticalBorderSpacing(short v) { SET_VAR(inherited, vertical_border_spacing, v) }
RoundedRect RenderStyle::getRoundedBorderFor(const LayoutRect& borderRect, RenderView* renderView, bool includeLogicalLeftEdge, bool includeLogicalRightEdge) const
{
IntRect snappedBorderRect(pixelSnappedIntRect(borderRect));
RoundedRect roundedRect(snappedBorderRect);
if (hasBorderRadius()) {
RoundedRect::Radii radii = calcRadiiFor(surround->border, snappedBorderRect.size(), renderView);
radii.scale(calcConstraintScaleFor(snappedBorderRect, radii));
roundedRect.includeLogicalEdges(radii, isHorizontalWritingMode(), includeLogicalLeftEdge, includeLogicalRightEdge);
}
return roundedRect;
}
RoundedRect RenderStyle::getRoundedInnerBorderFor(const LayoutRect& borderRect, bool includeLogicalLeftEdge, bool includeLogicalRightEdge) const
{
bool horizontal = isHorizontalWritingMode();
LayoutUnit leftWidth = (!horizontal || includeLogicalLeftEdge) ? borderLeftWidth() : 0;
LayoutUnit rightWidth = (!horizontal || includeLogicalRightEdge) ? borderRightWidth() : 0;
LayoutUnit topWidth = (horizontal || includeLogicalLeftEdge) ? borderTopWidth() : 0;
LayoutUnit bottomWidth = (horizontal || includeLogicalRightEdge) ? borderBottomWidth() : 0;
return getRoundedInnerBorderFor(borderRect, topWidth, bottomWidth, leftWidth, rightWidth, includeLogicalLeftEdge, includeLogicalRightEdge);
}
RoundedRect RenderStyle::getRoundedInnerBorderFor(const LayoutRect& borderRect,
LayoutUnit topWidth, LayoutUnit bottomWidth, LayoutUnit leftWidth, LayoutUnit rightWidth, bool includeLogicalLeftEdge, bool includeLogicalRightEdge) const
{
LayoutRect innerRect(borderRect.x() + leftWidth,
borderRect.y() + topWidth,
borderRect.width() - leftWidth - rightWidth,
borderRect.height() - topWidth - bottomWidth);
RoundedRect roundedRect(pixelSnappedIntRect(innerRect));
if (hasBorderRadius()) {
RoundedRect::Radii radii = getRoundedBorderFor(borderRect).radii();
radii.shrink(topWidth, bottomWidth, leftWidth, rightWidth);
roundedRect.includeLogicalEdges(radii, isHorizontalWritingMode(), includeLogicalLeftEdge, includeLogicalRightEdge);
}
return roundedRect;
}
const CounterDirectiveMap* RenderStyle::counterDirectives() const
{
return rareNonInheritedData->m_counterDirectives.get();
}
CounterDirectiveMap& RenderStyle::accessCounterDirectives()
{
OwnPtr<CounterDirectiveMap>& map = rareNonInheritedData.access()->m_counterDirectives;
if (!map)
map = adoptPtr(new CounterDirectiveMap);
return *map;
}
const CounterDirectives RenderStyle::getCounterDirectives(const AtomicString& identifier) const
{
if (const CounterDirectiveMap* directives = counterDirectives())
return directives->get(identifier);
return CounterDirectives();
}
const AtomicString& RenderStyle::hyphenString() const
{
ASSERT(hyphens() != HyphensNone);
const AtomicString& hyphenationString = rareInheritedData.get()->hyphenationString;
if (!hyphenationString.isNull())
return hyphenationString;
// FIXME: This should depend on locale.
DEFINE_STATIC_LOCAL(AtomicString, hyphenMinusString, (&hyphenMinus, 1));
DEFINE_STATIC_LOCAL(AtomicString, hyphenString, (&hyphen, 1));
return font().primaryFontHasGlyphForCharacter(hyphen) ? hyphenString : hyphenMinusString;
}
const AtomicString& RenderStyle::textEmphasisMarkString() const
{
switch (textEmphasisMark()) {
case TextEmphasisMarkNone:
return nullAtom;
case TextEmphasisMarkCustom:
return textEmphasisCustomMark();
case TextEmphasisMarkDot: {
DEFINE_STATIC_LOCAL(AtomicString, filledDotString, (&bullet, 1));
DEFINE_STATIC_LOCAL(AtomicString, openDotString, (&whiteBullet, 1));
return textEmphasisFill() == TextEmphasisFillFilled ? filledDotString : openDotString;
}
case TextEmphasisMarkCircle: {
DEFINE_STATIC_LOCAL(AtomicString, filledCircleString, (&blackCircle, 1));
DEFINE_STATIC_LOCAL(AtomicString, openCircleString, (&whiteCircle, 1));
return textEmphasisFill() == TextEmphasisFillFilled ? filledCircleString : openCircleString;
}
case TextEmphasisMarkDoubleCircle: {
DEFINE_STATIC_LOCAL(AtomicString, filledDoubleCircleString, (&fisheye, 1));
DEFINE_STATIC_LOCAL(AtomicString, openDoubleCircleString, (&bullseye, 1));
return textEmphasisFill() == TextEmphasisFillFilled ? filledDoubleCircleString : openDoubleCircleString;
}
case TextEmphasisMarkTriangle: {
DEFINE_STATIC_LOCAL(AtomicString, filledTriangleString, (&blackUpPointingTriangle, 1));
DEFINE_STATIC_LOCAL(AtomicString, openTriangleString, (&whiteUpPointingTriangle, 1));
return textEmphasisFill() == TextEmphasisFillFilled ? filledTriangleString : openTriangleString;
}
case TextEmphasisMarkSesame: {
DEFINE_STATIC_LOCAL(AtomicString, filledSesameString, (&sesameDot, 1));
DEFINE_STATIC_LOCAL(AtomicString, openSesameString, (&whiteSesameDot, 1));
return textEmphasisFill() == TextEmphasisFillFilled ? filledSesameString : openSesameString;
}
case TextEmphasisMarkAuto:
ASSERT_NOT_REACHED();
return nullAtom;
}
ASSERT_NOT_REACHED();
return nullAtom;
}
#if ENABLE(DASHBOARD_SUPPORT)
const Vector<StyleDashboardRegion>& RenderStyle::initialDashboardRegions()
{
DEFINE_STATIC_LOCAL(Vector<StyleDashboardRegion>, emptyList, ());
return emptyList;
}
const Vector<StyleDashboardRegion>& RenderStyle::noneDashboardRegions()
{
DEFINE_STATIC_LOCAL(Vector<StyleDashboardRegion>, noneList, ());
static bool noneListInitialized = false;
if (!noneListInitialized) {
StyleDashboardRegion region;
region.label = "";
region.offset.m_top = Length();
region.offset.m_right = Length();
region.offset.m_bottom = Length();
region.offset.m_left = Length();
region.type = StyleDashboardRegion::None;
noneList.append(region);
noneListInitialized = true;
}
return noneList;
}
#endif
void RenderStyle::adjustAnimations()
{
AnimationList* animationList = rareNonInheritedData->m_animations.get();
if (!animationList)
return;
// Get rid of empty animations and anything beyond them
for (size_t i = 0; i < animationList->size(); ++i) {
if (animationList->animation(i)->isEmpty()) {
animationList->resize(i);
break;
}
}
if (animationList->isEmpty()) {
clearAnimations();
return;
}
// Repeat patterns into layers that don't have some properties set.
animationList->fillUnsetProperties();
}
void RenderStyle::adjustTransitions()
{
AnimationList* transitionList = rareNonInheritedData->m_transitions.get();
if (!transitionList)
return;
// Get rid of empty transitions and anything beyond them
for (size_t i = 0; i < transitionList->size(); ++i) {
if (transitionList->animation(i)->isEmpty()) {
transitionList->resize(i);
break;
}
}
if (transitionList->isEmpty()) {
clearTransitions();
return;
}
// Repeat patterns into layers that don't have some properties set.
transitionList->fillUnsetProperties();
// Make sure there are no duplicate properties. This is an O(n^2) algorithm
// but the lists tend to be very short, so it is probably ok
for (size_t i = 0; i < transitionList->size(); ++i) {
for (size_t j = i+1; j < transitionList->size(); ++j) {
if (transitionList->animation(i)->property() == transitionList->animation(j)->property()) {
// toss i
transitionList->remove(i);
j = i;
}
}
}
}
AnimationList* RenderStyle::accessAnimations()
{
if (!rareNonInheritedData.access()->m_animations)
rareNonInheritedData.access()->m_animations = adoptPtr(new AnimationList());
return rareNonInheritedData->m_animations.get();
}
AnimationList* RenderStyle::accessTransitions()
{
if (!rareNonInheritedData.access()->m_transitions)
rareNonInheritedData.access()->m_transitions = adoptPtr(new AnimationList());
return rareNonInheritedData->m_transitions.get();
}
const Animation* RenderStyle::transitionForProperty(CSSPropertyID property) const
{
if (transitions()) {
for (size_t i = 0; i < transitions()->size(); ++i) {
const Animation* p = transitions()->animation(i);
if (p->animationMode() == Animation::AnimateAll || p->property() == property) {
return p;
}
}
}
return 0;
}
const Font& RenderStyle::font() const { return inherited->font; }
const FontMetrics& RenderStyle::fontMetrics() const { return inherited->font.fontMetrics(); }
const FontDescription& RenderStyle::fontDescription() const { return inherited->font.fontDescription(); }
float RenderStyle::specifiedFontSize() const { return fontDescription().specifiedSize(); }
float RenderStyle::computedFontSize() const { return fontDescription().computedSize(); }
int RenderStyle::fontSize() const { return inherited->font.pixelSize(); }
int RenderStyle::wordSpacing() const { return inherited->font.wordSpacing(); }
int RenderStyle::letterSpacing() const { return inherited->font.letterSpacing(); }
bool RenderStyle::setFontDescription(const FontDescription& v)
{
if (inherited->font.fontDescription() != v) {
inherited.access()->font = Font(v, inherited->font.letterSpacing(), inherited->font.wordSpacing());
return true;
}
return false;
}
Length RenderStyle::specifiedLineHeight() const { return inherited->line_height; }
Length RenderStyle::lineHeight() const
{
const Length& lh = inherited->line_height;
#if ENABLE(TEXT_AUTOSIZING)
// Unlike fontDescription().computedSize() and hence fontSize(), this is
// recalculated on demand as we only store the specified line height.
// FIXME: Should consider scaling the fixed part of any calc expressions
// too, though this involves messily poking into CalcExpressionLength.
float multiplier = textAutosizingMultiplier();
if (multiplier > 1 && lh.isFixed())
return Length(TextAutosizer::computeAutosizedFontSize(lh.value(), multiplier), Fixed);
#endif
return lh;
}
void RenderStyle::setLineHeight(Length specifiedLineHeight) { SET_VAR(inherited, line_height, specifiedLineHeight); }
int RenderStyle::computedLineHeight(RenderView* renderView) const
{
const Length& lh = lineHeight();
// Negative value means the line height is not set. Use the font's built-in spacing.
if (lh.isNegative())
return fontMetrics().lineSpacing();
if (lh.isPercent())
return minimumValueForLength(lh, fontSize());
if (lh.isViewportPercentage())
return valueForLength(lh, 0, renderView);
return lh.value();
}
void RenderStyle::setWordSpacing(int v) { inherited.access()->font.setWordSpacing(v); }
void RenderStyle::setLetterSpacing(int v) { inherited.access()->font.setLetterSpacing(v); }
void RenderStyle::setFontSize(float size)
{
// size must be specifiedSize if Text Autosizing is enabled, but computedSize if text
// zoom is enabled (if neither is enabled it's irrelevant as they're probably the same).
FontSelector* currentFontSelector = font().fontSelector();
FontDescription desc(fontDescription());
desc.setSpecifiedSize(size);
desc.setComputedSize(size);
#if ENABLE(TEXT_AUTOSIZING)
float multiplier = textAutosizingMultiplier();
if (multiplier > 1) {
desc.setComputedSize(TextAutosizer::computeAutosizedFontSize(size, multiplier));
}
#endif
setFontDescription(desc);
font().update(currentFontSelector);
}
void RenderStyle::getShadowExtent(const ShadowData* shadow, LayoutUnit &top, LayoutUnit &right, LayoutUnit &bottom, LayoutUnit &left) const
{
top = 0;
right = 0;
bottom = 0;
left = 0;
for ( ; shadow; shadow = shadow->next()) {
if (shadow->style() == Inset)
continue;
int blurAndSpread = shadow->blur() + shadow->spread();
top = min<LayoutUnit>(top, shadow->y() - blurAndSpread);
right = max<LayoutUnit>(right, shadow->x() + blurAndSpread);
bottom = max<LayoutUnit>(bottom, shadow->y() + blurAndSpread);
left = min<LayoutUnit>(left, shadow->x() - blurAndSpread);
}
}
LayoutBoxExtent RenderStyle::getShadowInsetExtent(const ShadowData* shadow) const
{
LayoutUnit top = 0;
LayoutUnit right = 0;
LayoutUnit bottom = 0;
LayoutUnit left = 0;
for ( ; shadow; shadow = shadow->next()) {
if (shadow->style() == Normal)
continue;
int blurAndSpread = shadow->blur() + shadow->spread();
top = max<LayoutUnit>(top, shadow->y() + blurAndSpread);
right = min<LayoutUnit>(right, shadow->x() - blurAndSpread);
bottom = min<LayoutUnit>(bottom, shadow->y() - blurAndSpread);
left = max<LayoutUnit>(left, shadow->x() + blurAndSpread);
}
return LayoutBoxExtent(top, right, bottom, left);
}
void RenderStyle::getShadowHorizontalExtent(const ShadowData* shadow, LayoutUnit &left, LayoutUnit &right) const
{
left = 0;
right = 0;
for ( ; shadow; shadow = shadow->next()) {
if (shadow->style() == Inset)
continue;
int blurAndSpread = shadow->blur() + shadow->spread();
left = min<LayoutUnit>(left, shadow->x() - blurAndSpread);
right = max<LayoutUnit>(right, shadow->x() + blurAndSpread);
}
}
void RenderStyle::getShadowVerticalExtent(const ShadowData* shadow, LayoutUnit &top, LayoutUnit &bottom) const
{
top = 0;
bottom = 0;
for ( ; shadow; shadow = shadow->next()) {
if (shadow->style() == Inset)
continue;
int blurAndSpread = shadow->blur() + shadow->spread();
top = min<LayoutUnit>(top, shadow->y() - blurAndSpread);
bottom = max<LayoutUnit>(bottom, shadow->y() + blurAndSpread);
}
}
Color RenderStyle::colorIncludingFallback(int colorProperty, bool visitedLink) const
{
Color result;
EBorderStyle borderStyle = BNONE;
switch (colorProperty) {
case CSSPropertyBackgroundColor:
return visitedLink ? visitedLinkBackgroundColor() : backgroundColor(); // Background color doesn't fall back.
case CSSPropertyBorderLeftColor:
result = visitedLink ? visitedLinkBorderLeftColor() : borderLeftColor();
borderStyle = borderLeftStyle();
break;
case CSSPropertyBorderRightColor:
result = visitedLink ? visitedLinkBorderRightColor() : borderRightColor();
borderStyle = borderRightStyle();
break;
case CSSPropertyBorderTopColor:
result = visitedLink ? visitedLinkBorderTopColor() : borderTopColor();
borderStyle = borderTopStyle();
break;
case CSSPropertyBorderBottomColor:
result = visitedLink ? visitedLinkBorderBottomColor() : borderBottomColor();
borderStyle = borderBottomStyle();
break;
case CSSPropertyColor:
result = visitedLink ? visitedLinkColor() : color();
break;
case CSSPropertyOutlineColor:
result = visitedLink ? visitedLinkOutlineColor() : outlineColor();
break;
case CSSPropertyWebkitColumnRuleColor:
result = visitedLink ? visitedLinkColumnRuleColor() : columnRuleColor();
break;
case CSSPropertyWebkitTextEmphasisColor:
result = visitedLink ? visitedLinkTextEmphasisColor() : textEmphasisColor();
break;
case CSSPropertyWebkitTextFillColor:
result = visitedLink ? visitedLinkTextFillColor() : textFillColor();
break;
case CSSPropertyWebkitTextStrokeColor:
result = visitedLink ? visitedLinkTextStrokeColor() : textStrokeColor();
break;
default:
ASSERT_NOT_REACHED();
break;
}
if (!result.isValid()) {
if (!visitedLink && (borderStyle == INSET || borderStyle == OUTSET || borderStyle == RIDGE || borderStyle == GROOVE))
result.setRGB(238, 238, 238);
else
result = visitedLink ? visitedLinkColor() : color();
}
return result;
}
Color RenderStyle::visitedDependentColor(int colorProperty) const
{
Color unvisitedColor = colorIncludingFallback(colorProperty, false);
if (insideLink() != InsideVisitedLink)
return unvisitedColor;
Color visitedColor = colorIncludingFallback(colorProperty, true);
// FIXME: Technically someone could explicitly specify the color transparent, but for now we'll just
// assume that if the background color is transparent that it wasn't set. Note that it's weird that
// we're returning unvisited info for a visited link, but given our restriction that the alpha values
// have to match, it makes more sense to return the unvisited background color if specified than it
// does to return black. This behavior matches what Firefox 4 does as well.
if (colorProperty == CSSPropertyBackgroundColor && visitedColor == Color::transparent)
return unvisitedColor;
// Take the alpha from the unvisited color, but get the RGB values from the visited color.
return Color(visitedColor.red(), visitedColor.green(), visitedColor.blue(), unvisitedColor.alpha());
}
const BorderValue& RenderStyle::borderBefore() const
{
switch (writingMode()) {
case TopToBottomWritingMode:
return borderTop();
case BottomToTopWritingMode:
return borderBottom();
case LeftToRightWritingMode:
return borderLeft();
case RightToLeftWritingMode:
return borderRight();
}
ASSERT_NOT_REACHED();
return borderTop();
}
const BorderValue& RenderStyle::borderAfter() const
{
switch (writingMode()) {
case TopToBottomWritingMode:
return borderBottom();
case BottomToTopWritingMode:
return borderTop();
case LeftToRightWritingMode:
return borderRight();
case RightToLeftWritingMode:
return borderLeft();
}
ASSERT_NOT_REACHED();
return borderBottom();
}
const BorderValue& RenderStyle::borderStart() const
{
if (isHorizontalWritingMode())
return isLeftToRightDirection() ? borderLeft() : borderRight();
return isLeftToRightDirection() ? borderTop() : borderBottom();
}
const BorderValue& RenderStyle::borderEnd() const
{
if (isHorizontalWritingMode())
return isLeftToRightDirection() ? borderRight() : borderLeft();
return isLeftToRightDirection() ? borderBottom() : borderTop();
}
unsigned short RenderStyle::borderBeforeWidth() const
{
switch (writingMode()) {
case TopToBottomWritingMode:
return borderTopWidth();
case BottomToTopWritingMode:
return borderBottomWidth();
case LeftToRightWritingMode:
return borderLeftWidth();
case RightToLeftWritingMode:
return borderRightWidth();
}
ASSERT_NOT_REACHED();
return borderTopWidth();
}
unsigned short RenderStyle::borderAfterWidth() const
{
switch (writingMode()) {
case TopToBottomWritingMode:
return borderBottomWidth();
case BottomToTopWritingMode:
return borderTopWidth();
case LeftToRightWritingMode:
return borderRightWidth();
case RightToLeftWritingMode:
return borderLeftWidth();
}
ASSERT_NOT_REACHED();
return borderBottomWidth();
}
unsigned short RenderStyle::borderStartWidth() const
{
if (isHorizontalWritingMode())
return isLeftToRightDirection() ? borderLeftWidth() : borderRightWidth();
return isLeftToRightDirection() ? borderTopWidth() : borderBottomWidth();
}
unsigned short RenderStyle::borderEndWidth() const
{
if (isHorizontalWritingMode())
return isLeftToRightDirection() ? borderRightWidth() : borderLeftWidth();
return isLeftToRightDirection() ? borderBottomWidth() : borderTopWidth();
}
void RenderStyle::setMarginStart(Length margin)
{
if (isHorizontalWritingMode()) {
if (isLeftToRightDirection())
setMarginLeft(margin);
else
setMarginRight(margin);
} else {
if (isLeftToRightDirection())
setMarginTop(margin);
else
setMarginBottom(margin);
}
}
void RenderStyle::setMarginEnd(Length margin)
{
if (isHorizontalWritingMode()) {
if (isLeftToRightDirection())
setMarginRight(margin);
else
setMarginLeft(margin);
} else {
if (isLeftToRightDirection())
setMarginBottom(margin);
else
setMarginTop(margin);
}
}
TextEmphasisMark RenderStyle::textEmphasisMark() const
{
TextEmphasisMark mark = static_cast<TextEmphasisMark>(rareInheritedData->textEmphasisMark);
if (mark != TextEmphasisMarkAuto)
return mark;
if (isHorizontalWritingMode())
return TextEmphasisMarkDot;
return TextEmphasisMarkSesame;
}
#if ENABLE(TOUCH_EVENTS)
Color RenderStyle::initialTapHighlightColor()
{
return RenderTheme::tapHighlightColor();
}
#endif
LayoutBoxExtent RenderStyle::imageOutsets(const NinePieceImage& image) const
{
return LayoutBoxExtent(NinePieceImage::computeOutset(image.outset().top(), borderTopWidth()),
NinePieceImage::computeOutset(image.outset().right(), borderRightWidth()),
NinePieceImage::computeOutset(image.outset().bottom(), borderBottomWidth()),
NinePieceImage::computeOutset(image.outset().left(), borderLeftWidth()));
}
void RenderStyle::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
{
MemoryClassInfo info(memoryObjectInfo, this, WebCoreMemoryTypes::CSS);
info.addMember(m_box);
info.addMember(visual);
// FIXME: m_background contains RefPtr<StyleImage> that might need to be instrumented.
info.addMember(m_background);
// FIXME: surrond contains some fields e.g. BorderData that might need to be instrumented.
info.addMember(surround);
info.addMember(rareNonInheritedData);
info.addMember(rareInheritedData);
// FIXME: inherited contains StyleImage and Font fields that might need to be instrumented.
info.addMember(inherited);
info.addMember(m_cachedPseudoStyles);
#if ENABLE(SVG)
info.addMember(m_svgStyle);
#endif
}
} // namespace WebCore
| {
"content_hash": "14bd419cddba93dc2db6977a80c33ee2",
"timestamp": "",
"source": "github",
"line_count": 1583,
"max_line_length": 183,
"avg_line_length": 40.57296272899558,
"alnum_prop": 0.6941628909959986,
"repo_name": "yoavweiss/RespImg-WebCore",
"id": "387fd20cbc1778215fc17fa9e9aede1a89cc998c",
"size": "65228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rendering/style/RenderStyle.cpp",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "1301"
},
{
"name": "C",
"bytes": "2369715"
},
{
"name": "C++",
"bytes": "39064862"
},
{
"name": "JavaScript",
"bytes": "3763760"
},
{
"name": "Objective-C",
"bytes": "2038598"
},
{
"name": "Perl",
"bytes": "768866"
},
{
"name": "Prolog",
"bytes": "519"
},
{
"name": "Python",
"bytes": "210630"
},
{
"name": "Ruby",
"bytes": "1927"
},
{
"name": "Shell",
"bytes": "8214"
}
],
"symlink_target": ""
} |
"""
State Class
Stores information from data read by the json
this is done instead of passing a deserialized dict to objects so that named values
can be used
"""
import os
import json
class State(object):
"""
Represents the state updated by the API
"""
# the unix time when the file has been last updated
last_updated_time = 0
# the path of the file to read the state from
file_path = "unset"
# colors 1 and 2, that must be tuples or lists w/ max value 255
color1 = (255, 0, 0)
color2 = (0, 0, 0)
# if color1/2 are random
random1 = False
random2 = False
# the pattern id to use
pattern = 0
# depending on the pattern, typically is how many LEDs
# wide the pattern is
length = 5
# how long in milliseconds the pattern takes to show a difference
delay = 10
def __init__(self, file_path):
self.file_path = file_path
# load the initial state from the file
self.__update_state()
def check_update(self):
"""
Checks the state file to see if has been updated,
and if so, reads from the file.
"""
try:
time = os.path.getmtime(self.file_path)
if time > self.last_updated_time:
# file has been updated since the last time it was read
self.__update_state()
self.last_updated_time = time
except OSError:
# file not found
pass
def __update_state(self):
"""
Updates the state of this object from the file, called
when the file is updated.
Does not perform any validation on the data, only may check
that the values for types are valid
"""
# read from the file
with open(self.file_path) as f:
data = json.load(f)
self.color1 = tuple(data['color1'])
self.color2 = tuple(data['color2'])
self.random1 = bool(data['random1'])
self.random2 = bool(data['random2'])
self.pattern = int(data['pattern'])
self.length = int(data['length'])
self.delay = int(data['delay'])
| {
"content_hash": "111bd1aa5074114685253cac948d3c39",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 83,
"avg_line_length": 30.591549295774648,
"alnum_prop": 0.583793738489871,
"repo_name": "Chris-Johnston/Internet-Xmas-Tree",
"id": "b8094d4d630b621e2f7617401d66c39636d71c33",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lights/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1317"
},
{
"name": "HTML",
"bytes": "5579"
},
{
"name": "JavaScript",
"bytes": "1531"
},
{
"name": "Python",
"bytes": "24983"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
package main
import (
"errors"
"fmt"
"github.com/bitly/go-simplejson"
"io/ioutil"
"log"
"math"
"github.com/bakins/net-http-recover"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/gorilla/rpc"
"github.com/gorilla/rpc/json"
"github.com/justinas/alice"
"net/http"
"os"
"strconv"
"strings"
)
type StockAccounts struct {
stockPortfolio map[int](*Portfolio)
}
type Portfolio struct {
stocks map[string](*Share)
UninvestedAmount float32
}
type Share struct {
boughtPrice float32
shareId int
}
type BuyRequest struct {
StockSymbolAndPercentage string
Budget float32
}
type BuyResponse struct {
TradeId int
Stocks []string
UninvestedAmount float32
}
type CheckRequest struct {
TradeId string
}
type CheckResponse struct {
Stocks []string
UninvestedAmount float32
TotalMarketValue float32
}
var st StockAccounts
var tradeId int
func main() {
var st = (new(StockAccounts))
tradeId = 100
router := mux.NewRouter()
server := rpc.NewServer()
server.RegisterCodec(json.NewCodec(), "application/json")
server.RegisterService(st, "")
chain := alice.New(
func(h http.Handler) http.Handler {
return handlers.CombinedLoggingHandler(os.Stdout, h)
},
handlers.CompressHandler,
func(h http.Handler) http.Handler {
return recovery.Handler(os.Stderr, h, true)
})
router.Handle("/rpc", chain.Then(server))
log.Fatal(http.ListenAndServe(":5062", server))
}
func (st *StockAccounts) Buy(httpRequest *http.Request, request *BuyRequest, response *BuyResponse) error {
tradeId++
response.TradeId = tradeId
if st.stockPortfolio == nil {
st.stockPortfolio = make(map[int](*Portfolio))
st.stockPortfolio[tradeId] = new(Portfolio)
st.stockPortfolio[tradeId].stocks = make(map[string]*Share)
}
symbolAndPercentages := strings.Split(request.StockSymbolAndPercentage, ",")
newbudget := float32(request.Budget)
var spent float32
for _, stk := range symbolAndPercentages {
split := strings.Split(stk, ":")
stockQuote := split[0]
percentage := split[1]
strPercentage := strings.TrimSuffix(percentage, "%")
floatPercentage64, _ := strconv.ParseFloat(strPercentage, 32)
floatPercentage := float32(floatPercentage64 / 100.00)
currentPrice := checkQuote(stockQuote)
shares := int(math.Floor(float64(newbudget * floatPercentage / currentPrice)))
sharesFloat := float32(shares)
spent += sharesFloat * currentPrice
if _, ok := st.stockPortfolio[tradeId]; !ok {
newPortfolio := new(Portfolio)
newPortfolio.stocks = make(map[string]*Share)
st.stockPortfolio[tradeId] = newPortfolio
}
if _, ok := st.stockPortfolio[tradeId].stocks[stockQuote]; !ok {
newShare := new(Share)
newShare.boughtPrice = currentPrice
newShare.shareId = shares
st.stockPortfolio[tradeId].stocks[stockQuote] = newShare
} else {
total := float32(sharesFloat*currentPrice) + float32(st.stockPortfolio[tradeId].stocks[stockQuote].shareId)*st.stockPortfolio[tradeId].stocks[stockQuote].boughtPrice
st.stockPortfolio[tradeId].stocks[stockQuote].boughtPrice = total / float32(shares+st.stockPortfolio[tradeId].stocks[stockQuote].shareId)
st.stockPortfolio[tradeId].stocks[stockQuote].shareId += shares
}
stockBought := stockQuote + ":" + strconv.Itoa(shares) + ":$" + strconv.FormatFloat(float64(currentPrice), 'f', 2, 32)
response.Stocks = append(response.Stocks, stockBought)
}
leftOver := newbudget - spent
response.UninvestedAmount = leftOver
st.stockPortfolio[tradeId].UninvestedAmount += leftOver
return nil
}
func (st *StockAccounts) Check(httpRequest *http.Request, checkRq *CheckRequest, checkResp *CheckResponse) error {
if st.stockPortfolio == nil {
return errors.New("No account set up yet.")
}
tradeNum64, err := strconv.ParseInt(checkRq.TradeId, 10, 64)
if err != nil {
return errors.New("Illegal Trade ID. ")
}
tradeId := int(tradeNum64)
if pocket, ok := st.stockPortfolio[tradeId]; ok {
var currentMarketVal float32
for stockquote, sh := range pocket.stocks {
currentPrice := checkQuote(stockquote)
var str string
if sh.boughtPrice < currentPrice {
str = "+$" + strconv.FormatFloat(float64(currentPrice), 'f', 2, 32)
} else if sh.boughtPrice > currentPrice {
str = "-$" + strconv.FormatFloat(float64(currentPrice), 'f', 2, 32)
} else {
str = "$" + strconv.FormatFloat(float64(currentPrice), 'f', 2, 32)
}
entry := stockquote + ":" + strconv.Itoa(sh.shareId) + ":" + str
checkResp.Stocks = append(checkResp.Stocks, entry)
currentMarketVal += float32(sh.shareId) * currentPrice
}
checkResp.UninvestedAmount = pocket.UninvestedAmount
checkResp.TotalMarketValue = currentMarketVal
} else {
return errors.New("No such trade ID. ")
}
return nil
}
func checkError(err error) {
if err != nil {
fmt.Println(err)
}
}
func checkQuote(stockName string) float32 {
leftBaseUrl := "https://query.yahooapis.com/v1/public/yql?q=select%20LastTradePriceOnly%20from%20yahoo.finance%0A.quotes%20where%20symbol%20%3D%20%22"
rightBaseUrl := "%22%0A%09%09&format=json&env=http%3A%2F%2Fdatatables.org%2Falltables.env"
resp, err := http.Get(leftBaseUrl + stockName + rightBaseUrl)
if err != nil {
log.Fatal(err)
}
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Fatal(err)
}
if resp.StatusCode != 200 {
log.Fatal("Query failure, possibly no network connection or illegal stock quote ")
}
newjson, err := simplejson.NewJson(body)
if err != nil {
fmt.Println(err)
}
price, _ := newjson.Get("query").Get("results").Get("quote").Get("LastTradePriceOnly").String()
floatPrice, err := strconv.ParseFloat(price, 32)
return float32(floatPrice)
}
| {
"content_hash": "53f017dc6f3d5d3f2937796bdb650062",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 168,
"avg_line_length": 25.38938053097345,
"alnum_prop": 0.7098292087835483,
"repo_name": "mohdzeeshanshaikh/cmpe273-assignment1",
"id": "8784bc6c09764cefe8b7795ece03ed2882f2ece3",
"size": "5738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.go",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "8715"
}
],
"symlink_target": ""
} |
/* Afrikaans initialisation for the jQuery UI date picker plugin. */
/* Written by Renier Pretorius. */
jQuery(function ($) {
$.datepicker.regional['af'] = {
closeText: 'Selekteer',
prevText: 'Vorige',
nextText: 'Volgende',
currentText: 'Vandag',
monthNames: ['Januarie', 'Februarie', 'Maart', 'April', 'Mei', 'Junie',
'Julie', 'Augustus', 'September', 'Oktober', 'November', 'Desember'],
monthNamesShort: ['Jan', 'Feb', 'Mrt', 'Apr', 'Mei', 'Jun',
'Jul', 'Aug', 'Sep', 'Okt', 'Nov', 'Des'],
dayNames: ['Sondag', 'Maandag', 'Dinsdag', 'Woensdag', 'Donderdag', 'Vrydag', 'Saterdag'],
dayNamesShort: ['Son', 'Maa', 'Din', 'Woe', 'Don', 'Vry', 'Sat'],
dayNamesMin: ['So', 'Ma', 'Di', 'Wo', 'Do', 'Vr', 'Sa'],
weekHeader: 'Wk',
dateFormat: 'dd/mm/yy',
firstDay: 1,
isRTL: false,
showMonthAfterYear: false,
yearSuffix: ''
};
$.datepicker.setDefaults($.datepicker.regional['af']);
});
| {
"content_hash": "70ef06555329d7bcdcd9b0da9efbb1a2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 98,
"avg_line_length": 43.125,
"alnum_prop": 0.5371980676328503,
"repo_name": "XristosMallios/cache",
"id": "2d5412f974dfffdae81c2856e2d166ef29febc01",
"size": "1035",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "exareme-master/src/main/static/libs/jquery-ui-1.8.18.custom/development-bundle/ui/i18n/jquery.ui.datepicker-af.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "66130"
},
{
"name": "HTML",
"bytes": "1330883"
},
{
"name": "Java",
"bytes": "4022490"
},
{
"name": "JavaScript",
"bytes": "6100034"
},
{
"name": "PHP",
"bytes": "124335"
},
{
"name": "Python",
"bytes": "2478415"
},
{
"name": "R",
"bytes": "671"
},
{
"name": "Shell",
"bytes": "15240"
}
],
"symlink_target": ""
} |
package com.auth.managers.admin;
import com.auth.daos.admin.DaoUsuariosPermisos;
import com.auth.entities.admin.UsuariosPermisos;
import com.auth.entities.admin.UsuariosPermisosPK;
import com.auth.managers.commons.ManagerSQL;
/**
*
* @author Alonso --- alonso@kriblet.com
*/
public class ManagerUsuariosPermisos extends ManagerSQL<UsuariosPermisos, UsuariosPermisosPK> {
public ManagerUsuariosPermisos() {
super(new DaoUsuariosPermisos());
}
}
| {
"content_hash": "4d6872614cef514efefae0f8c22a7c17",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 24.63157894736842,
"alnum_prop": 0.7713675213675214,
"repo_name": "BeltranGomezUlises/machineAdminAPI",
"id": "b02c92365973e2a105370973809cdb3551a40ebf",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/com/auth/managers/admin/ManagerUsuariosPermisos.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "237"
},
{
"name": "Java",
"bytes": "310417"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Generic;
using System.Windows;
namespace ReportsOrganizer.UI.Controls
{
public enum PackIconFlipOrientation
{
Normal, Horizontal, Vertical, Both
}
public class PackIcon<TKind> : PackIconBase<TKind>
{
public static readonly DependencyProperty FlipProperty
= DependencyProperty.Register("Flip", typeof(PackIconFlipOrientation), typeof(PackIcon<TKind>),
new PropertyMetadata(PackIconFlipOrientation.Normal));
public static readonly DependencyProperty RotationProperty
= DependencyProperty.Register("Rotation", typeof(double), typeof(PackIcon<TKind>),
new PropertyMetadata(0d, null, RotationPropertyCoerceValueCallback));
private static object RotationPropertyCoerceValueCallback(DependencyObject dependencyObject, object value)
{
var val = (double)value;
return val < 0 ? 0d : (val > 360 ? 360d : value);
}
public PackIconFlipOrientation Flip
{
get => (PackIconFlipOrientation)GetValue(FlipProperty);
set => SetValue(FlipProperty, value);
}
public double Rotation
{
get => (double)GetValue(RotationProperty);
set => SetValue(RotationProperty, value);
}
public PackIcon(Func<IDictionary<TKind, string>> dataIndexFactory) : base(dataIndexFactory)
{
}
}
}
| {
"content_hash": "c96439aea448ffe1ce70d1e5dc4dd7da",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 114,
"avg_line_length": 33.56818181818182,
"alnum_prop": 0.6492890995260664,
"repo_name": "SkySoftTechDotNet/ReportsOrganizer.Windows",
"id": "e9c9a0b1228aca391d836e29c4bf601ecc4ba1eb",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ReportsOrganizer/ReportsOrganizer.UI/Controls/PackIcon.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "216092"
}
],
"symlink_target": ""
} |
https://developers.google.com/google-bsd-license
Copyright 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| {
"content_hash": "8c38f4d8b48127d5a3b521c1c809483b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 51.36666666666667,
"alnum_prop": 0.8020765736534717,
"repo_name": "google/ios-webkit-debug-proxy",
"id": "14401137514ca7900b56b0b39cc2d699d58939ef",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LICENSE.md",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "249557"
},
{
"name": "M4",
"bytes": "2884"
},
{
"name": "Makefile",
"bytes": "2144"
},
{
"name": "Shell",
"bytes": "418"
}
],
"symlink_target": ""
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_151) on Sat Mar 16 04:11:50 MST 2019 -->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>PolicySupplier (BOM: * : All 2.3.1.Final-SNAPSHOT API)</title>
<meta name="date" content="2019-03-16">
<link rel="stylesheet" type="text/css" href="../../../../../stylesheet.css" title="Style">
<script type="text/javascript" src="../../../../../script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="PolicySupplier (BOM: * : All 2.3.1.Final-SNAPSHOT API)";
}
}
catch(err) {
}
//-->
var methods = {"i0":6};
var tabs = {65535:["t0","All Methods"],2:["t2","Instance Methods"],4:["t3","Abstract Methods"]};
var altColor = "altColor";
var rowColor = "rowColor";
var tableTab = "tableTab";
var activeTableTab = "activeTableTab";
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/PolicySupplier.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">Thorntail API, 2.3.1.Final-SNAPSHOT</div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/wildfly/swarm/config/elytron/PolicyConsumer.html" title="interface in org.wildfly.swarm.config.elytron"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/wildfly/swarm/config/elytron/PropertiesRealm.html" title="class in org.wildfly.swarm.config.elytron"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/wildfly/swarm/config/elytron/PolicySupplier.html" target="_top">Frames</a></li>
<li><a href="PolicySupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.wildfly.swarm.config.elytron</div>
<h2 title="Interface PolicySupplier" class="title">Interface PolicySupplier<T extends <a href="../../../../../org/wildfly/swarm/config/elytron/Policy.html" title="class in org.wildfly.swarm.config.elytron">Policy</a>></h2>
</div>
<div class="contentContainer">
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>Functional Interface:</dt>
<dd>This is a functional interface and can therefore be used as the assignment target for a lambda expression or method reference.</dd>
</dl>
<hr>
<br>
<pre><a href="https://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true" title="class or interface in java.lang">@FunctionalInterface</a>
public interface <span class="typeNameLabel">PolicySupplier<T extends <a href="../../../../../org/wildfly/swarm/config/elytron/Policy.html" title="class in org.wildfly.swarm.config.elytron">Policy</a>></span></pre>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method.summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="memberSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span id="t0" class="activeTableTab"><span>All Methods</span><span class="tabEnd"> </span></span><span id="t2" class="tableTab"><span><a href="javascript:show(2);">Instance Methods</a></span><span class="tabEnd"> </span></span><span id="t3" class="tableTab"><span><a href="javascript:show(4);">Abstract Methods</a></span><span class="tabEnd"> </span></span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr id="i0" class="altColor">
<td class="colFirst"><code><a href="../../../../../org/wildfly/swarm/config/elytron/Policy.html" title="class in org.wildfly.swarm.config.elytron">Policy</a></code></td>
<td class="colLast"><code><span class="memberNameLink"><a href="../../../../../org/wildfly/swarm/config/elytron/PolicySupplier.html#get--">get</a></span>()</code>
<div class="block">Constructed instance of Policy resource</div>
</td>
</tr>
</table>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method.detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="get--">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>get</h4>
<pre><a href="../../../../../org/wildfly/swarm/config/elytron/Policy.html" title="class in org.wildfly.swarm.config.elytron">Policy</a> get()</pre>
<div class="block">Constructed instance of Policy resource</div>
<dl>
<dt><span class="returnLabel">Returns:</span></dt>
<dd>The instance</dd>
</dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/PolicySupplier.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../help-doc.html">Help</a></li>
</ul>
<div class="aboutLanguage">Thorntail API, 2.3.1.Final-SNAPSHOT</div>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="../../../../../org/wildfly/swarm/config/elytron/PolicyConsumer.html" title="interface in org.wildfly.swarm.config.elytron"><span class="typeNameLink">Prev Class</span></a></li>
<li><a href="../../../../../org/wildfly/swarm/config/elytron/PropertiesRealm.html" title="class in org.wildfly.swarm.config.elytron"><span class="typeNameLink">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../index.html?org/wildfly/swarm/config/elytron/PolicySupplier.html" target="_top">Frames</a></li>
<li><a href="PolicySupplier.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li>Nested | </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li>Constr | </li>
<li><a href="#method.detail">Method</a></li>
</ul>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>Copyright © 2019 <a href="http://www.jboss.org">JBoss by Red Hat</a>. All rights reserved.</small></p>
</body>
</html>
| {
"content_hash": "b32d2adeb0bc7b5f3c5d45fa672ccbce",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 391,
"avg_line_length": 38.561181434599156,
"alnum_prop": 0.6371594266331109,
"repo_name": "wildfly-swarm/wildfly-swarm-javadocs",
"id": "c947dda98c8b51bcd0ffd561c5fc1ef28b45f947",
"size": "9139",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "2.3.1.Final-SNAPSHOT/apidocs/org/wildfly/swarm/config/elytron/PolicySupplier.html",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>coquelicot: Not compatible 👼</title>
<link rel="shortcut icon" type="image/png" href="../../../../../favicon.png" />
<link href="../../../../../bootstrap.min.css" rel="stylesheet">
<link href="../../../../../bootstrap-custom.css" rel="stylesheet">
<link href="//maxcdn.bootstrapcdn.com/font-awesome/4.2.0/css/font-awesome.min.css" rel="stylesheet">
<script src="../../../../../moment.min.js"></script>
<!-- HTML5 Shim and Respond.js IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container">
<div class="navbar navbar-default" role="navigation">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="../../../../.."><i class="fa fa-lg fa-flag-checkered"></i> Coq bench</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="../..">clean / released</a></li>
<li class="active"><a href="">8.8.2 / coquelicot - 2.1.1</a></li>
</ul>
</div>
</div>
</div>
<div class="article">
<div class="row">
<div class="col-md-12">
<a href="../..">« Up</a>
<h1>
coquelicot
<small>
2.1.1
<span class="label label-info">Not compatible 👼</span>
</small>
</h1>
<p>📅 <em><script>document.write(moment("2022-06-06 01:32:17 +0000", "YYYY-MM-DD HH:mm:ss Z").fromNow());</script> (2022-06-06 01:32:17 UTC)</em><p>
<h2>Context</h2>
<pre># Packages matching: installed
# Name # Installed # Synopsis
base-bigarray base
base-threads base
base-unix base
camlp5 7.14 Preprocessor-pretty-printer of OCaml
conf-findutils 1 Virtual package relying on findutils
conf-perl 2 Virtual package relying on perl
coq 8.8.2 Formal proof management system
num 1.4 The legacy Num library for arbitrary-precision integer and rational arithmetic
ocaml 4.09.1 The OCaml compiler (virtual package)
ocaml-base-compiler 4.09.1 Official release 4.09.1
ocaml-config 1 OCaml Switch Configuration
ocamlfind 1.9.3 A library manager for OCaml
# opam file:
opam-version: "2.0"
maintainer: "guillaume.melquiond@inria.fr"
homepage: "http://coquelicot.saclay.inria.fr/"
dev-repo: "git+https://gitlab.inria.fr/coquelicot/coquelicot.git"
bug-reports: "https://gitlab.inria.fr/coquelicot/coquelicot/issues"
license: "LGPL-3.0-or-later"
build: [
["./configure"]
["./remake" "-j%{jobs}%"]
]
install: ["./remake" "install"]
depends: [
"coq" {>= "8.4pl4" & < "8.6~"}
"coq-mathcomp-ssreflect" {>= "1.6"}
]
tags: [ "keyword:real analysis" "keyword:topology" "keyword:filters" "keyword:metric spaces" "category:Mathematics/Real Calculus and Topology" ]
authors: [ "Sylvie Boldo <sylvie.boldo@inria.fr>" "Catherine Lelay <catherine.lelay@inria.fr>" "Guillaume Melquiond <guillaume.melquiond@inria.fr>" ]
synopsis: "A Coq formalization of real analysis compatible with the standard library"
url {
src: "https://coquelicot.gitlabpages.inria.fr/releases/coquelicot-2.1.1.tar.gz"
checksum: "md5=bd648a43a06f422ee6ba886f93d0a534"
}
</pre>
<h2>Lint</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Dry install 🏜️</h2>
<p>Dry install with the current Coq version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam install -y --show-action coq-coquelicot.2.1.1 coq.8.8.2</code></dd>
<dt>Return code</dt>
<dd>5120</dd>
<dt>Output</dt>
<dd><pre>[NOTE] Package coq is already installed (current version is 8.8.2).
The following dependencies couldn't be met:
- coq-coquelicot -> coq < 8.6~ -> ocaml < 4.06.0
base of this switch (use `--unlock-base' to force)
No solution found, exiting
</pre></dd>
</dl>
<p>Dry install without Coq/switch base, to test if the problem was incompatibility with the current Coq/OCaml version:</p>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>opam remove -y coq; opam install -y --show-action --unlock-base coq-coquelicot.2.1.1</code></dd>
<dt>Return code</dt>
<dd>0</dd>
</dl>
<h2>Install dependencies</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Install 🚀</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Duration</dt>
<dd>0 s</dd>
</dl>
<h2>Installation size</h2>
<p>No files were installed.</p>
<h2>Uninstall 🧹</h2>
<dl class="dl-horizontal">
<dt>Command</dt>
<dd><code>true</code></dd>
<dt>Return code</dt>
<dd>0</dd>
<dt>Missing removes</dt>
<dd>
none
</dd>
<dt>Wrong removes</dt>
<dd>
none
</dd>
</dl>
</div>
</div>
</div>
<hr/>
<div class="footer">
<p class="text-center">
Sources are on <a href="https://github.com/coq-bench">GitHub</a> © Guillaume Claret 🐣
</p>
</div>
</div>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="../../../../../bootstrap.min.js"></script>
</body>
</html>
| {
"content_hash": "cb3d7a4e50562886f67c951aa72df24f",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 197,
"avg_line_length": 42.84567901234568,
"alnum_prop": 0.5424290448062239,
"repo_name": "coq-bench/coq-bench.github.io",
"id": "ac66fd979acd4ef88fc84e4507b98ea1ac7e26d9",
"size": "6966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clean/Linux-x86_64-4.09.1-2.0.6/released/8.8.2/coquelicot/2.1.1.html",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
//
// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003
//
// Arjuna Technologies Ltd.,
// Newcastle upon Tyne,
// Tyne and Wear,
// UK.
//
package org.jboss.jbossts.qa.RawResources01Clients2;
/*
* Copyright (C) 1999-2001 by HP Bluestone Software, Inc. All rights Reserved.
*
* HP Arjuna Labs,
* Newcastle upon Tyne,
* Tyne and Wear,
* UK.
*
* $Id: Client136.java,v 1.3 2003/07/07 13:43:18 jcoleman Exp $
*/
/*
* Try to get around the differences between Ansi CPP and
* K&R cpp with concatenation.
*/
/*
* Copyright (C) 1999-2001 by HP Bluestone Software, Inc. All rights Reserved.
*
* HP Arjuna Labs,
* Newcastle upon Tyne,
* Tyne and Wear,
* UK.
*
* $Id: Client136.java,v 1.3 2003/07/07 13:43:18 jcoleman Exp $
*/
import org.jboss.jbossts.qa.RawResources01.*;
import org.jboss.jbossts.qa.Utils.OAInterface;
import org.jboss.jbossts.qa.Utils.ORBInterface;
import org.jboss.jbossts.qa.Utils.OTS;
import org.jboss.jbossts.qa.Utils.ServerIORStore;
import org.omg.CosTransactions.HeuristicHazard;
public class Client136
{
public static void main(String[] args)
{
try
{
ORBInterface.initORB(args, null);
OAInterface.initOA();
String serviceIOR1 = ServerIORStore.loadIOR(args[args.length - 2]);
Service service1 = ServiceHelper.narrow(ORBInterface.orb().string_to_object(serviceIOR1));
String serviceIOR2 = ServerIORStore.loadIOR(args[args.length - 1]);
Service service2 = ServiceHelper.narrow(ORBInterface.orb().string_to_object(serviceIOR2));
ResourceBehavior[] resourceBehaviors1 = new ResourceBehavior[1];
resourceBehaviors1[0] = new ResourceBehavior();
resourceBehaviors1[0].prepare_behavior = PrepareBehavior.PrepareBehaviorRaiseHeuristicHazard;
resourceBehaviors1[0].rollback_behavior = RollbackBehavior.RollbackBehaviorReturn;
resourceBehaviors1[0].commit_behavior = CommitBehavior.CommitBehaviorReturn;
resourceBehaviors1[0].commitonephase_behavior = CommitOnePhaseBehavior.CommitOnePhaseBehaviorReturn;
ResourceBehavior[] resourceBehaviors2 = new ResourceBehavior[1];
resourceBehaviors2[0] = new ResourceBehavior();
resourceBehaviors2[0].prepare_behavior = PrepareBehavior.PrepareBehaviorReturnVoteReadOnly;
resourceBehaviors2[0].rollback_behavior = RollbackBehavior.RollbackBehaviorRaiseHeuristicHazard;
resourceBehaviors2[0].commit_behavior = CommitBehavior.CommitBehaviorReturn;
resourceBehaviors2[0].commitonephase_behavior = CommitOnePhaseBehavior.CommitOnePhaseBehaviorReturn;
boolean correct = true;
OTS.current().begin();
service1.oper(resourceBehaviors1);
service2.oper(resourceBehaviors2);
try
{
OTS.current().commit(true);
System.err.println("Commit succeeded when it shouldn't");
correct = false;
}
catch (HeuristicHazard heuristicHazard)
{
}
correct = correct && service1.is_correct() && service2.is_correct();
if (!correct)
{
System.err.println("service1.is_correct() or service2.is_correct() returned false");
}
ResourceTrace resourceTrace1 = service1.get_resource_trace(0);
ResourceTrace resourceTrace2 = service2.get_resource_trace(0);
correct = correct && (resourceTrace1 == ResourceTrace.ResourceTracePrepareForget);
correct = correct && ((resourceTrace2 == ResourceTrace.ResourceTracePrepareRollbackForget) || (resourceTrace2 == ResourceTrace.ResourceTraceRollback));
if (correct)
{
System.out.println("Passed");
}
else
{
System.out.println("Failed");
}
}
catch (Exception exception)
{
System.err.println("Client136.main: " + exception);
exception.printStackTrace(System.err);
System.out.println("Failed");
}
try
{
OAInterface.shutdownOA();
ORBInterface.shutdownORB();
}
catch (Exception exception)
{
System.err.println("Client136.main: " + exception);
exception.printStackTrace(System.err);
}
}
}
| {
"content_hash": "2b9665a36f80e1bc4e0a9139dfe8e1a6",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 154,
"avg_line_length": 28.828358208955223,
"alnum_prop": 0.7331089826559669,
"repo_name": "nmcl/wfswarm-example-arjuna-old",
"id": "7a35fc0f39e24639153f3cc7223344cf15381650",
"size": "4841",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "graalvm/transactions/fork/narayana/qa/tests/src/org/jboss/jbossts/qa/RawResources01Clients2/Client136.java",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "6903"
}
],
"symlink_target": ""
} |
module VagrantPlugins
module CapistranoPush
VERSION = '0.1.0'
end
end
| {
"content_hash": "02d2dd69c82e5a1b85a0fecbb1465561",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 23,
"avg_line_length": 15.6,
"alnum_prop": 0.717948717948718,
"repo_name": "mfenner/vagrant-capistrano-push",
"id": "969de1027375de1cf417a9865f15d928e04f161e",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/vagrant-capistrano-push/version.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "9337"
}
],
"symlink_target": ""
} |
layout: page
comments: true
show_meta: false
title: "Getting Started"
subheadline: "A Step-by-Step Guide"
teaser: "This is a step-by-step guide to preparing your computer for the IoT Labs."
header:
image_fullwidth: "header_homepage_13.jpg"
permalink: "/getting-started/"
---
# Preparing for the IoT Labs
The labs in this series build on each other to enable you to prototype your own Internet of Things (IoT) devices.
In this lab you will use Node.js and an open source framework for interacting with hardware called Johnny-Five,
which works as a baseline control kit for Arduino-based projects. This enables you to write applications in JavaScript
that can run either on your computer connected to an Arduino board or directly on the board itself (if the board has
a Linux distribution, like the Arduino Yún and Linino ONE).
We chose the Arduino Yún for this workshop because it has both a Linux distribution and on-board Wi-Fi, although several
of the labs can be completed using any Arduino board because the Node.js code will run on your laptop and use the Arduino
over USB. If you want to deploy the applications you build to the Arduino, you will need a board that has a Linux distro
(such as the Yún or the Linino ONE).
## Bill of Materials
To prepare your development environment for this lab series you don't need anything other than a computer. Each lab in the
series will have a bill of materials indicating what is required for that lab.
If you want to prepare yourself further before the labs you can acquire the following:
0. [Arduino Yún](http://www.arduino.cc/en/Main/ArduinoBoardYun)
1. USB to micro-USB cable
2. An Arduino compatible starter kit w/o the board (1)
1. An example would be the [SparkFun Inventor's Kit (for Arduino Uno) - V3.2](http://www.sparkfun.com/products/13154) (it comes with an Arduino Uno R3, which you can use only in some of the early lessons)
## Install the Arduino IDE
While you won't use the Arduino IDE very much in the set of labs, it is necessary for a couple of things. For one thing, installing the Arduino IDE also installs the USB drivers for the Arduino board.
Go to http://www.arduino.cc and follow the links to download the latest version of the Arduino IDE. Make sure that the checkbox for the USB driver is selected during install (it typically is by default).
## Install a Code Editor
If you don't already have one installed, pick a text/code editor. Feel free to use anything you like, provided it won't inject any extra text into your files.
Some Options:
* [Visual Studio Code](http://code.visualstudio.com/) (this is our preferred tool)
* [Visual Studio](http://www.visualstudio.com/)
* [Sublime Text](http://www.sublimetext.com/)
* [Eclipse](http://www.eclipse.org/downloads/)
* [Notepad++](http://notepad-plus-plus.org/)
## Install Git
Some of the tools you will be using in this workshop require Git. The download link can be found [here](http://git-scm.com/).
### Windows Only
During the Git install, check the option to _Use Git from the Windows command prompt_.
If you don't choose this option, after you have installed Git, you need to add the path to get to your development environment. To do that, add the path to Git to the PATH environment variable.
1. Open _Control Panel_ > _System and Security_ > _System_ then click on _Advanced Settings_.
2. Click on the _Environment Variables_ button toward the bottom of the dialog.
3. Locate the User variable named __PATH__ and double-click it.
4. Append the following to the Variable value textbox (if you installed Git to a different location you will need to modify this value accordingly):
<pre>
;C:\Program Files (x86)\Git\bin;C:\Program Files (x86)\Git\cmd
</pre>
5. Click _OK_ to close the Edit User Variable dialog.
6. Click _OK_ to close the System Properties dialog.
7. Close any remaining dialogs/windows (i.e. Control Panel).
## Install Node.js
In the labs you will write small programs that will run on your computer, connected to your Arduino (these can also be deployed to run solely on your Arduino Yún). These programs will be written in JavaScript and will be built on Node.js. If you are not familiar or experienced with Node.js, don't worry. You will learn everything you need to know for these labs in these labs.
Follow the [instructions here to install Node.js](http://nodejs.org/) on your computer.
## Install Bower
Bower is a package manager similar to the Node Package Manager (NPM). For these labs we will use both NPM and Bower. You install Bower using NPM.
On Windows, open the Node.js command prompt and type the following:
<pre>
npm install -g bower
</pre>
On Mac OS X open Terminal and type the following:
<pre>
sudo npm install -g bower
</pre>
## Install Apache Cordova and Cordova Icon
### Apache Cordova
Apache Cordova is an open-source mobile development framework. It allows you to use standard web technologies such as HTML5, CSS3, and JavaScript for cross-platform development, avoiding each mobile platforms' native development language. Applications execute within wrappers targeted to each platform, and rely on standards-compliant API bindings to access each device's sensors, data, and network status.
On Windows, open the Node.js command prompt and type the following:
<pre>
npm install -g cordova
</pre>
On Mac OS X open Terminal and type the following:
<pre>
sudo npm install -g cordova
</pre>
### Cordova Icon
Cordova Icon is a tool that provides automatic icon resizing for Cordova apps.
On Windows, open the Node.js command prompt and type the following:
<pre>
npm install -g cordova-icon
</pre>
On Mac OS X open Terminal and type the following:
<pre>
sudo npm install -g cordova-icon
</pre>
## Install Johnny-Five
Johnny-Five is an open source JavaScript framework that provides a simple object model for interacting with an Arduino-based board and the sensors and devices you connect to it.
Once you have Node.js installed, install [Johnny-Five](http://www.npmjs.com/package/johnny-five) using NPM.
On Windows, open the Node.js command prompt and type the following:
<pre>
npm install johnny-five
</pre>
On Mac OS X open Terminal and type the following:
<pre>
sudo npm install johnny-five
</pre>
## Install Nitrogen
Nitrogen is a messaging service that will act as a gateway for your Thing connecting to Azure. Nitrogen supports connecting devices via Message Queue Telemetry Transport (MQTT) or using the Nitrogen Node.js client library (you can learn more about MQTT here). You install Nitrogen using NPM.
On Windows, open the Node.js command prompt and type the following:
<pre>
npm install -g nitrogen-cli
</pre>
On Mac OS X open Terminal and type the following:
<pre>
sudo npm install -g nitrogen-cli
</pre>
## Set Up a Development Directory
The last thing to do is prepare a place to save all of your work in the labs. I recommend an easy to navigate to directory with a relatively short path. Create a new folder/directory for the workshop - I recommend:
Windows
<pre>
C:\Development\IoTLabs
</pre>
Mac OS X
<pre>
~\Devleopment\IoTLabs
</pre>
That's it for now. You are ready to start the [first set of labs](/iotlabs/lab001/).
| {
"content_hash": "f2dfaf4b4b1a99b6dc03f5c9118675dc",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 406,
"avg_line_length": 44.54320987654321,
"alnum_prop": 0.7645509977827051,
"repo_name": "felixrieseberg/ThingLabsIo.github.io",
"id": "2127c6b60527d645d868676f0c509c929901ac6b",
"size": "7220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pages/getting-started.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "362575"
},
{
"name": "HTML",
"bytes": "148862"
},
{
"name": "JavaScript",
"bytes": "250532"
},
{
"name": "Ruby",
"bytes": "3089"
}
],
"symlink_target": ""
} |
<section class="home" data-ng-controller="HomeController" data-ng-init="init()">
<div>
<uib-carousel interval="5000">
<uib-slide ng-repeat="slide in slides" active="slide.active" index="slide.id">
<img ng-src="{{slide.image}}" style="margin:auto;">
<div class="carousel-caption">
<h3 date-ng-show="slide.text">{{slide.text}}</h3>
</div>
</uib-slide>
</uib-carousel>
</div>
<div class="panel text-center">
<h3 translate>APP_DESCRIPTION</h3>
<hr/>
<p translate>
APP_INFO
</p>
<p class="hidden">
<a class="btn btn-primary btn-lg" href="#">
<i class="fa fa-download"></i>
<span translate>OFFLINE_APP_DOWNLOAD</span>
</a>
</p>
</div>
<div class="panel text-center">
<h3 translate>APP_AUTHOR</h3>
<hr/>
<p translate>
APP_AUTHOR_INFO
</p>
</div>
</section>
| {
"content_hash": "afad7fff86dc334d56416c0d591294f1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 84,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.5922897196261683,
"repo_name": "nejcsilc/lpa",
"id": "c860b1422417a897d5b9bab8182a16f94993d82e",
"size": "856",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "public/modules/core/views/home.client.view.html",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8410"
},
{
"name": "HTML",
"bytes": "43762"
},
{
"name": "JavaScript",
"bytes": "164201"
},
{
"name": "Shell",
"bytes": "693"
}
],
"symlink_target": ""
} |
<?php ?>
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<title>Theme Chat Login</title>
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<link href="<?php echo Yii::app()->request->baseUrl; ?>/app/css/lib/bootstrap.css" type="text/css" rel="stylesheet"/>
<link href="<?php echo Yii::app()->request->baseUrl; ?>/app/css/lib/font-awesome.css" type="text/css" rel="stylesheet"/>
<link href="<?php echo Yii::app()->request->baseUrl; ?>/app/css/app.css" type="text/css" rel="stylesheet"/>
<!--[if lt IE 9]>
<script src="//html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body>
<div id="ic-navbar" class="navbar navbar-static-top">
<div class="container ic-padding-none">
<div class="row">
<div class="text-center navbar-header col-lg-12 col-md-12 col-sm-12 col-xs-12">
<a class="ic-logo">
<strong>THEME</strong>CHAT<small>BETA</small>
</a>
<div class="pull-right">
<a href="../app"
class="btn btn-primary">
<h5><span class=""> Home</span></h5>
</a>
</div>
</div>
</div>
</div>
</div>
<?php echo $content; ?>
</body>
<script type="text/javascript" src="<?php echo Yii::app()->request->baseUrl; ?>/app/js/lib/jquery.js"></script>
<script type="text/javascript" src="<?php echo Yii::app()->request->baseUrl; ?>/app/js/lib/bootstrap.js"></script>
</html> | {
"content_hash": "7946a9ae9649325c34e115f3f3b827cb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 122,
"avg_line_length": 40.270270270270274,
"alnum_prop": 0.6140939597315436,
"repo_name": "tmushayahama/ichatstyle",
"id": "410ff49edc0cdcac91acb644dae7535964b00ce2",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protected/views/layouts/ic_main.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "229"
},
{
"name": "Batchfile",
"bytes": "380"
},
{
"name": "CSS",
"bytes": "92289"
},
{
"name": "Groff",
"bytes": "5246662"
},
{
"name": "HTML",
"bytes": "19181"
},
{
"name": "JavaScript",
"bytes": "357969"
},
{
"name": "PHP",
"bytes": "866124"
}
],
"symlink_target": ""
} |
CREATE TABLE IF NOT EXISTS `list_imei` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`time` int(10) unsigned NOT NULL,
`gid` smallint(6) NOT NULL,
`imei` bigint(20) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `gid` (`gid`,`imei`),
KEY `time` (`time`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 AUTO_INCREMENT=1;
| {
"content_hash": "4f7ac9fa6c7956c24f1826e880a6dfb6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 35.55555555555556,
"alnum_prop": 0.665625,
"repo_name": "qysf12345/swRech",
"id": "635e7e5e581e46b5eeda5644f49d933b84ba4bcd",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sql/list_imei.sql",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "3426"
},
{
"name": "PHP",
"bytes": "123087"
},
{
"name": "SQLPL",
"bytes": "1018"
},
{
"name": "Shell",
"bytes": "1053"
}
],
"symlink_target": ""
} |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE beans PUBLIC '-//SPRING//DTD BEAN//EN' 'http://www.springframework.org/dtd/spring-beans.dtd'>
<!--
This shows the common minimum configuration overrides.
By default, the content and indexes are located at a relative location, which should only
be used when doing a quick preview of the Alfresco server.
-->
<beans>
<!-- overriding to point to custom properties -->
<bean id="repository-properties" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="ignoreUnresolvablePlaceholders">
<value>true</value>
</property>
<property name="locations">
<list>
<value>classpath:alfresco/repository.properties</value>
<value>classpath:alfresco/version.properties</value>
<value>classpath:alfresco/domain/transaction.properties</value>
<!-- Override basic repository properties -->
<value>classpath:alfresco/extension/custom-repository.properties</value>
</list>
</property>
</bean>
<bean id="hibernateConfigProperties" class="org.springframework.beans.factory.config.PropertiesFactoryBean">
<property name="locations">
<list>
<value>classpath:alfresco/domain/hibernate-cfg.properties</value>
<!-- Override hibernate dialect -->
<value>classpath:alfresco/extension/custom-hibernate-dialect.properties</value>
</list>
</property>
</bean>
</beans> | {
"content_hash": "7404aea6e2abfb608d280269e0bda87a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 116,
"avg_line_length": 42.38461538461539,
"alnum_prop": 0.6249243799153055,
"repo_name": "sourcesense/Alfresco-OpenSSO-integration",
"id": "60077ad65354f6ffa6a62af42cca91dce32fd3a1",
"size": "1653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webclient/src/test/resources/alfresco/extension/custom-repository-context.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "66058"
}
],
"symlink_target": ""
} |
/**
*
*/
(function()
{
var app = angular.module('store-products', []);
app.directive('productTitle', function() {
return {
restrict: 'E',
templateUrl: 'product-title.html'
};
});
app.directive('productPanels', function() {
return {
restrict: 'E',
templateUrl: 'product-panels.html',
controller: function(){
this.tab = 1;
this.setTab = function(tab)
{
this.tab = tab;
};
this.isTabSelected = function(tab)
{
return this.tab === tab;
};
},
controllerAs : 'panel'
};
});
})(); | {
"content_hash": "1a00bf344faa6bef1e56337aa14bcd2f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 48,
"avg_line_length": 15.575,
"alnum_prop": 0.4943820224719101,
"repo_name": "altintzis/ITE220",
"id": "c8e0430b0f8f8fb5635c338062af11203afd5660",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "angular/js/products.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "216886"
},
{
"name": "HTML",
"bytes": "63897"
},
{
"name": "JavaScript",
"bytes": "501864"
},
{
"name": "PHP",
"bytes": "20442"
}
],
"symlink_target": ""
} |
package gov.hhs.onc.crigtt.utils;
import com.github.sebhoss.warnings.CompilerWarnings;
import java.util.function.Consumer;
import java.util.function.Supplier;
import javax.annotation.Nullable;
public final class CrigttFunctionUtils {
private CrigttFunctionUtils() {
}
@Nullable
@SuppressWarnings({ CompilerWarnings.UNCHECKED })
public static <T> T consume(Supplier<T> supplier, Consumer<T> ... consumers) {
return consume(supplier, null, consumers);
}
@Nullable
@SuppressWarnings({ CompilerWarnings.UNCHECKED })
public static <T> T consume(Supplier<T> supplier, @Nullable Supplier<T> nullSupplier, Consumer<T> ... consumers) {
T obj = (((obj = supplier.get()) != null) ? obj : ((nullSupplier != null) ? nullSupplier.get() : null));
if (consumers.length > 0) {
Consumer<T> consumer = consumers[0];
for (int a = 1; a < consumers.length; a++) {
consumer = consumer.andThen(consumers[a]);
}
consumer.accept(obj);
}
return obj;
}
}
| {
"content_hash": "6f7021a7492e8000dcd288ff41dbd0ee",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 118,
"avg_line_length": 30.82857142857143,
"alnum_prop": 0.6348470806302131,
"repo_name": "mkotelba/crigtt",
"id": "61fff83ccb99188352dd85ae9ded3fb4ac789708",
"size": "1079",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "crigtt-core/src/main/java/gov/hhs/onc/crigtt/utils/CrigttFunctionUtils.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5711"
},
{
"name": "Groovy",
"bytes": "26002"
},
{
"name": "HTML",
"bytes": "5134"
},
{
"name": "Java",
"bytes": "341067"
},
{
"name": "JavaScript",
"bytes": "23158"
},
{
"name": "XSLT",
"bytes": "251815"
}
],
"symlink_target": ""
} |
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.Time.now;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import javax.management.ObjectName;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex;
import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.StoredReplicaState;
import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.hdfs.util.FoldedTreeSet;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Keeps information related to the blocks stored in the Hadoop cluster.
* For block state management, it tries to maintain the safety
* property of "# of live replicas == # of expected redundancy" under
* any events such as decommission, namenode failover, datanode failure.
*
* The motivation of maintenance mode is to allow admins quickly repair nodes
* without paying the cost of decommission. Thus with maintenance mode,
* # of live replicas doesn't have to be equal to # of expected redundancy.
* If any of the replica is in maintenance mode, the safety property
* is extended as follows. These property still apply for the case of zero
* maintenance replicas, thus we can use these safe property for all scenarios.
* a. # of live replicas >= # of min replication for maintenance.
* b. # of live replicas <= # of expected redundancy.
* c. # of live replicas and maintenance replicas >= # of expected redundancy.
*
* For regular replication, # of min live replicas for maintenance is determined
* by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <=
* DFS_NAMENODE_REPLICATION_MIN_KEY.
* For erasure encoding, # of min live replicas for maintenance is
* BlockInfoStriped#getRealDataBlockNum.
*
* Another safety property is to satisfy the block placement policy. While the
* policy is configurable, the replicas the policy is applied to are the live
* replicas + maintenance replicas.
*/
@InterfaceAudience.Private
public class BlockManager implements BlockStatsMXBean {
public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class);
public static final Logger blockLog = NameNode.blockStateChangeLog;
private static final String QUEUE_REASON_CORRUPT_STATE =
"it has the wrong state or generation stamp";
private static final String QUEUE_REASON_FUTURE_GENSTAMP =
"generation stamp is in the future";
private static final long BLOCK_RECOVERY_TIMEOUT_MULTIPLIER = 30;
private final Namesystem namesystem;
private final BlockManagerSafeMode bmSafeMode;
private final DatanodeManager datanodeManager;
private final HeartbeatManager heartbeatManager;
private final BlockTokenSecretManager blockTokenSecretManager;
// Block pool ID used by this namenode
private String blockPoolId;
private final PendingDataNodeMessages pendingDNMessages =
new PendingDataNodeMessages();
private volatile long pendingReconstructionBlocksCount = 0L;
private volatile long corruptReplicaBlocksCount = 0L;
private volatile long lowRedundancyBlocksCount = 0L;
private volatile long scheduledReplicationBlocksCount = 0L;
/** flag indicating whether replication queues have been initialized */
private boolean initializedReplQueues;
private final long startupDelayBlockDeletionInMs;
private final BlockReportLeaseManager blockReportLeaseManager;
private ObjectName mxBeanName;
/** Used by metrics */
public long getPendingReconstructionBlocksCount() {
return pendingReconstructionBlocksCount;
}
/** Used by metrics */
public long getLowRedundancyBlocksCount() {
return lowRedundancyBlocksCount;
}
/** Used by metrics */
public long getCorruptReplicaBlocksCount() {
return corruptReplicaBlocksCount;
}
/** Used by metrics */
public long getScheduledReplicationBlocksCount() {
return scheduledReplicationBlocksCount;
}
/** Used by metrics */
public long getPendingDeletionBlocksCount() {
return invalidateBlocks.numBlocks();
}
/** Used by metrics */
public long getStartupDelayBlockDeletionInMs() {
return startupDelayBlockDeletionInMs;
}
/** Used by metrics */
public long getExcessBlocksCount() {
return excessRedundancyMap.size();
}
/** Used by metrics */
public long getPostponedMisreplicatedBlocksCount() {
return postponedMisreplicatedBlocks.size();
}
/** Used by metrics */
public int getPendingDataNodeMessageCount() {
return pendingDNMessages.count();
}
/** Used by metrics. */
public long getNumTimedOutPendingReconstructions() {
return pendingReconstruction.getNumTimedOuts();
}
/** Used by metrics. */
public long getLowRedundancyBlocks() {
return neededReconstruction.getLowRedundancyBlocks();
}
/** Used by metrics. */
public long getCorruptBlocks() {
return corruptReplicas.getCorruptBlocks();
}
/** Used by metrics. */
public long getMissingBlocks() {
return neededReconstruction.getCorruptBlocks();
}
/** Used by metrics. */
public long getMissingReplicationOneBlocks() {
return neededReconstruction.getCorruptReplicationOneBlocks();
}
/** Used by metrics. */
public long getPendingDeletionReplicatedBlocks() {
return invalidateBlocks.getBlocks();
}
/** Used by metrics. */
public long getTotalReplicatedBlocks() {
return blocksMap.getReplicatedBlocks();
}
/** Used by metrics. */
public long getLowRedundancyECBlockGroups() {
return neededReconstruction.getLowRedundancyECBlockGroups();
}
/** Used by metrics. */
public long getCorruptECBlockGroups() {
return corruptReplicas.getCorruptECBlockGroups();
}
/** Used by metrics. */
public long getMissingECBlockGroups() {
return neededReconstruction.getCorruptECBlockGroups();
}
/** Used by metrics. */
public long getPendingDeletionECBlocks() {
return invalidateBlocks.getECBlocks();
}
/** Used by metrics. */
public long getTotalECBlockGroups() {
return blocksMap.getECBlockGroups();
}
/**
* redundancyRecheckInterval is how often namenode checks for new
* reconstruction work.
*/
private final long redundancyRecheckIntervalMs;
/** How often to check and the limit for the storageinfo efficiency. */
private final long storageInfoDefragmentInterval;
private final long storageInfoDefragmentTimeout;
private final double storageInfoDefragmentRatio;
/**
* Mapping: Block -> { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information.
*/
final BlocksMap blocksMap;
/** Redundancy thread. */
private final Daemon redundancyThread = new Daemon(new RedundancyMonitor());
/** StorageInfoDefragmenter thread. */
private final Daemon storageInfoDefragmenterThread =
new Daemon(new StorageInfoDefragmenter());
/** Block report thread for handling async reports. */
private final BlockReportProcessingThread blockReportThread =
new BlockReportProcessingThread();
/** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
/**
* Blocks to be invalidated.
* For a striped block to invalidate, we should track its individual internal
* blocks.
*/
private final InvalidateBlocks invalidateBlocks;
/**
* After a failover, over-replicated blocks may not be handled
* until all of the replicas have done a block report to the
* new active. This is to make sure that this NameNode has been
* notified of all block deletions that might have been pending
* when the failover happened.
*/
private final Set<Block> postponedMisreplicatedBlocks =
new LinkedHashSet<Block>();
private final int blocksPerPostpondedRescan;
private final ArrayList<Block> rescannedMisreplicatedBlocks;
/**
* Maps a StorageID to the set of blocks that are "extra" for this
* DataNode. We'll eventually remove these extras.
*/
private final ExcessRedundancyMap excessRedundancyMap =
new ExcessRedundancyMap();
/**
* Store set of Blocks that need to be replicated 1 or more times.
* We also store pending reconstruction-orders.
*/
public final LowRedundancyBlocks neededReconstruction =
new LowRedundancyBlocks();
@VisibleForTesting
final PendingReconstructionBlocks pendingReconstruction;
/** Stores information about block recovery attempts. */
private final PendingRecoveryBlocks pendingRecoveryBlocks;
/** The maximum number of replicas allowed for a block */
public final short maxReplication;
/**
* The maximum number of outgoing replication streams a given node should have
* at one time considering all but the highest priority replications needed.
*/
int maxReplicationStreams;
/**
* The maximum number of outgoing replication streams a given node should have
* at one time.
*/
int replicationStreamsHardLimit;
/** Minimum copies needed or else write is disallowed */
public final short minReplication;
/** Default number of replicas */
public final int defaultReplication;
/** value returned by MAX_CORRUPT_FILES_RETURNED */
final int maxCorruptFilesReturned;
final float blocksInvalidateWorkPct;
final int blocksReplWorkMultiplier;
// whether or not to issue block encryption keys.
final boolean encryptDataTransfer;
// Max number of blocks to log info about during a block report.
private final long maxNumBlocksToLog;
/**
* When running inside a Standby node, the node may receive block reports
* from datanodes before receiving the corresponding namespace edits from
* the active NameNode. Thus, it will postpone them for later processing,
* instead of marking the blocks as corrupt.
*/
private boolean shouldPostponeBlocksFromFuture = false;
/**
* Process reconstruction queues asynchronously to allow namenode safemode
* exit and failover to be faster. HDFS-5496.
*/
private Daemon reconstructionQueuesInitializer = null;
/**
* Number of blocks to process asychronously for reconstruction queues
* initialization once aquired the namesystem lock. Remaining blocks will be
* processed again after aquiring lock again.
*/
private int numBlocksPerIteration;
/**
* Progress of the Reconstruction queues initialisation.
*/
private double reconstructionQueuesInitProgress = 0.0;
/** for block replicas placement */
private BlockPlacementPolicies placementPolicies;
private final BlockStoragePolicySuite storagePolicySuite;
/** Check whether name system is running before terminating */
private boolean checkNSRunning = true;
/** Check whether there are any non-EC blocks using StripedID */
private boolean hasNonEcBlockUsingStripedID = false;
private final BlockIdManager blockIdManager;
/**
* For satisfying block storage policies. Instantiates if sps is enabled
* internally or externally.
*/
private StoragePolicySatisfyManager spsManager;
/** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
private final short minReplicationToBeInMaintenance;
/** Storages accessible from multiple DNs. */
private final ProvidedStorageMap providedStorageMap;
public BlockManager(final Namesystem namesystem, boolean haEnabled,
final Configuration conf) throws IOException {
this.namesystem = namesystem;
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
this.blockIdManager = new BlockIdManager(this);
blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
rescannedMisreplicatedBlocks =
new ArrayList<Block>(blocksPerPostpondedRescan);
startupDelayBlockDeletionInMs = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
invalidateBlocks = new InvalidateBlocks(
datanodeManager.getBlockInvalidateLimit(),
startupDelayBlockDeletionInMs,
blockIdManager);
// Compute the map capacity by allocating 2% of total memory
blocksMap = new BlocksMap(
LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
placementPolicies = new BlockPlacementPolicies(
conf, datanodeManager.getFSClusterStats(),
datanodeManager.getNetworkTopology(),
datanodeManager.getHost2DatanodeMap());
storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
pendingReconstruction = new PendingReconstructionBlocks(conf.getInt(
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
* 1000L);
createSPSManager(conf);
blockTokenSecretManager = createBlockTokenSecretManager(conf);
providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
this.maxCorruptFilesReturned = conf.getInt(
DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY,
DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
if (minR <= 0)
throw new IOException("Unexpected configuration parameters: "
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
+ " = " + minR + " <= 0");
if (maxR > Short.MAX_VALUE)
throw new IOException("Unexpected configuration parameters: "
+ DFSConfigKeys.DFS_REPLICATION_MAX_KEY
+ " = " + maxR + " > " + Short.MAX_VALUE);
if (minR > maxR)
throw new IOException("Unexpected configuration parameters: "
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
+ " = " + minR + " > "
+ DFSConfigKeys.DFS_REPLICATION_MAX_KEY
+ " = " + maxR);
this.minReplication = (short)minR;
this.maxReplication = (short)maxR;
this.maxReplicationStreams =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
this.replicationStreamsHardLimit =
conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
this.redundancyRecheckIntervalMs = conf.getTimeDuration(
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT,
TimeUnit.SECONDS) * 1000;
this.storageInfoDefragmentInterval =
conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_KEY,
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_INTERVAL_MS_DEFAULT);
this.storageInfoDefragmentTimeout =
conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_KEY,
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_TIMEOUT_MS_DEFAULT);
this.storageInfoDefragmentRatio =
conf.getDouble(
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_KEY,
DFSConfigKeys.DFS_NAMENODE_STORAGEINFO_DEFRAGMENT_RATIO_DEFAULT);
this.encryptDataTransfer =
conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
this.maxNumBlocksToLog =
conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
this.numBlocksPerIteration = conf.getInt(
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
final int minMaintenanceR = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT);
if (minMaintenanceR < 0) {
throw new IOException("Unexpected configuration parameters: "
+ DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY
+ " = " + minMaintenanceR + " < 0");
}
if (minMaintenanceR > defaultReplication) {
throw new IOException("Unexpected configuration parameters: "
+ DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY
+ " = " + minMaintenanceR + " > "
+ DFSConfigKeys.DFS_REPLICATION_KEY
+ " = " + defaultReplication);
}
this.minReplicationToBeInMaintenance = (short)minMaintenanceR;
long heartbeatIntervalSecs = conf.getTimeDuration(
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS);
long blockRecoveryTimeout = getBlockRecoveryTimeout(heartbeatIntervalSecs);
pendingRecoveryBlocks = new PendingRecoveryBlocks(blockRecoveryTimeout);
this.blockReportLeaseManager = new BlockReportLeaseManager(conf);
bmSafeMode = new BlockManagerSafeMode(this, namesystem, haEnabled, conf);
LOG.info("defaultReplication = {}", defaultReplication);
LOG.info("maxReplication = {}", maxReplication);
LOG.info("minReplication = {}", minReplication);
LOG.info("maxReplicationStreams = {}", maxReplicationStreams);
LOG.info("redundancyRecheckInterval = {}ms", redundancyRecheckIntervalMs);
LOG.info("encryptDataTransfer = {}", encryptDataTransfer);
LOG.info("maxNumBlocksToLog = {}", maxNumBlocksToLog);
}
private static BlockTokenSecretManager createBlockTokenSecretManager(
final Configuration conf) throws IOException {
final boolean isEnabled = conf.getBoolean(
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
LOG.info("{} = {}", DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
isEnabled);
if (!isEnabled) {
if (UserGroupInformation.isSecurityEnabled()) {
String errMessage = "Security is enabled but block access tokens " +
"(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
"aren't enabled. This may cause issues " +
"when clients attempt to connect to a DataNode. Aborting NameNode";
throw new IOException(errMessage);
}
return null;
}
final long updateMin = conf.getLong(
DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY,
DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT);
final long lifetimeMin = conf.getLong(
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY,
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT);
final String encryptionAlgorithm = conf.get(
DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
LOG.info("{}={} min(s), {}={} min(s), {}={}",
DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, updateMin,
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, lifetimeMin,
DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId);
boolean shouldWriteProtobufToken = conf.getBoolean(
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE,
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
if (isHaEnabled) {
// figure out which index we are of the nns
Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
String nnId = HAUtil.getNameNodeId(conf, nsId);
int nnIndex = 0;
for (String id : nnIds) {
if (id.equals(nnId)) {
break;
}
nnIndex++;
}
return new BlockTokenSecretManager(updateMin * 60 * 1000L,
lifetimeMin * 60 * 1000L, nnIndex, nnIds.size(), null,
encryptionAlgorithm, shouldWriteProtobufToken);
} else {
return new BlockTokenSecretManager(updateMin*60*1000L,
lifetimeMin*60*1000L, 0, 1, null, encryptionAlgorithm,
shouldWriteProtobufToken);
}
}
public BlockStoragePolicy getStoragePolicy(final String policyName) {
return storagePolicySuite.getPolicy(policyName);
}
public BlockStoragePolicy getStoragePolicy(final byte policyId) {
return storagePolicySuite.getPolicy(policyId);
}
public BlockStoragePolicy[] getStoragePolicies() {
return storagePolicySuite.getAllPolicies();
}
public void setBlockPoolId(String blockPoolId) {
this.blockPoolId = blockPoolId;
if (isBlockTokenEnabled()) {
blockTokenSecretManager.setBlockPoolId(blockPoolId);
}
}
public String getBlockPoolId() {
return blockPoolId;
}
public BlockStoragePolicySuite getStoragePolicySuite() {
return storagePolicySuite;
}
/** get the BlockTokenSecretManager */
@VisibleForTesting
public BlockTokenSecretManager getBlockTokenSecretManager() {
return blockTokenSecretManager;
}
/** Allow silent termination of redundancy monitor for testing. */
@VisibleForTesting
void enableRMTerminationForTesting() {
checkNSRunning = false;
}
private boolean isBlockTokenEnabled() {
return blockTokenSecretManager != null;
}
/** Should the access keys be updated? */
boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
return isBlockTokenEnabled() && blockTokenSecretManager.updateKeys(updateTime);
}
public void activate(Configuration conf, long blockTotal) {
pendingReconstruction.start();
datanodeManager.activate(conf);
this.redundancyThread.setName("RedundancyMonitor");
this.redundancyThread.start();
storageInfoDefragmenterThread.setName("StorageInfoMonitor");
storageInfoDefragmenterThread.start();
this.blockReportThread.start();
mxBeanName = MBeans.register("NameNode", "BlockStats", this);
bmSafeMode.activate(blockTotal);
}
public void close() {
if (getSPSManager() != null) {
getSPSManager().stop();
}
bmSafeMode.close();
try {
redundancyThread.interrupt();
storageInfoDefragmenterThread.interrupt();
blockReportThread.interrupt();
redundancyThread.join(3000);
storageInfoDefragmenterThread.join(3000);
blockReportThread.join(3000);
} catch (InterruptedException ie) {
}
datanodeManager.close();
pendingReconstruction.stop();
blocksMap.close();
}
/** @return the datanodeManager */
public DatanodeManager getDatanodeManager() {
return datanodeManager;
}
@VisibleForTesting
public BlockPlacementPolicy getBlockPlacementPolicy() {
return placementPolicies.getPolicy(CONTIGUOUS);
}
/** Dump meta data to out. */
public void metaSave(PrintWriter out) {
assert namesystem.hasWriteLock(); // TODO: block manager read lock and NS write lock
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
datanodeManager.fetchDatanodes(live, dead, false);
out.println("Live Datanodes: " + live.size());
out.println("Dead Datanodes: " + dead.size());
//
// Need to iterate over all queues from neededReplications
// except for the QUEUE_WITH_CORRUPT_BLOCKS)
//
synchronized (neededReconstruction) {
out.println("Metasave: Blocks waiting for reconstruction: "
+ neededReconstruction.getLowRedundancyBlockCount());
for (int i = 0; i < neededReconstruction.LEVEL; i++) {
if (i != neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS) {
for (Iterator<BlockInfo> it = neededReconstruction.iterator(i);
it.hasNext();) {
Block block = it.next();
dumpBlockMeta(block, out);
}
}
}
//
// Now prints corrupt blocks separately
//
out.println("Metasave: Blocks currently missing: " +
neededReconstruction.getCorruptBlockSize());
for (Iterator<BlockInfo> it = neededReconstruction.
iterator(neededReconstruction.QUEUE_WITH_CORRUPT_BLOCKS);
it.hasNext();) {
Block block = it.next();
dumpBlockMeta(block, out);
}
}
// Dump any postponed over-replicated blocks
out.println("Mis-replicated blocks that have been postponed:");
for (Block block : postponedMisreplicatedBlocks) {
dumpBlockMeta(block, out);
}
// Dump blocks from pendingReconstruction
pendingReconstruction.metaSave(out);
// Dump blocks that are waiting to be deleted
invalidateBlocks.dump(out);
//Dump corrupt blocks and their storageIDs
Set<Block> corruptBlocks = corruptReplicas.getCorruptBlocksSet();
out.println("Corrupt Blocks:");
for(Block block : corruptBlocks) {
Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(block);
if (corruptNodes == null) {
LOG.warn("{} is corrupt but has no associated node.",
block.getBlockId());
continue;
}
int numNodesToFind = corruptNodes.size();
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
DatanodeDescriptor node = storage.getDatanodeDescriptor();
if (corruptNodes.contains(node)) {
String storageId = storage.getStorageID();
DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
State state = (storageInfo == null) ? null : storageInfo.getState();
out.println("Block=" + block.toString()
+ "\tSize=" + block.getNumBytes()
+ "\tNode=" + node.getName() + "\tStorageID=" + storageId
+ "\tStorageState=" + state
+ "\tTotalReplicas=" + blocksMap.numNodes(block)
+ "\tReason=" + corruptReplicas.getCorruptReason(block, node));
numNodesToFind--;
if (numNodesToFind == 0) {
break;
}
}
}
if (numNodesToFind > 0) {
String[] corruptNodesList = new String[corruptNodes.size()];
int i = 0;
for (DatanodeDescriptor d : corruptNodes) {
corruptNodesList[i] = d.getHostName();
i++;
}
out.println(block.getBlockId() + " corrupt on " +
StringUtils.join(",", corruptNodesList) + " but not all nodes are" +
"found in its block locations");
}
}
// Dump all datanodes
getDatanodeManager().datanodeDump(out);
}
/**
* Dump the metadata for the given block in a human-readable
* form.
*/
private void dumpBlockMeta(Block block, PrintWriter out) {
List<DatanodeDescriptor> containingNodes =
new ArrayList<DatanodeDescriptor>();
List<DatanodeStorageInfo> containingLiveReplicasNodes =
new ArrayList<DatanodeStorageInfo>();
NumberReplicas numReplicas = new NumberReplicas();
// source node returned is not used
chooseSourceDatanodes(getStoredBlock(block), containingNodes,
containingLiveReplicasNodes, numReplicas,
new LinkedList<Byte>(), LowRedundancyBlocks.LEVEL);
// containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are
// not included in the numReplicas.liveReplicas() count
assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas();
int usableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissionedAndDecommissioning();
if (block instanceof BlockInfo) {
BlockCollection bc = getBlockCollection((BlockInfo)block);
String fileName = (bc == null) ? "[orphaned]" : bc.getName();
out.print(fileName + ": ");
}
// l: == live:, d: == decommissioned c: == corrupt e: == excess
out.print(block + ((usableReplicas > 0)? "" : " MISSING") +
" (replicas:" +
" live: " + numReplicas.liveReplicas() +
" decommissioning and decommissioned: " +
numReplicas.decommissionedAndDecommissioning() +
" corrupt: " + numReplicas.corruptReplicas() +
" in excess: " + numReplicas.excessReplicas() +
" maintenance mode: " + numReplicas.maintenanceReplicas() + ") ");
Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(block);
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
String state = "";
if (corruptNodes != null && corruptNodes.contains(node)) {
state = "(corrupt)";
} else if (node.isDecommissioned() ||
node.isDecommissionInProgress()) {
state = "(decommissioned)";
} else if (node.isMaintenance() || node.isInMaintenance()){
state = "(maintenance)";
}
if (storage.areBlockContentsStale()) {
state += " (block deletions maybe out of date)";
}
out.print(" " + node + state + " : ");
}
out.println("");
}
/** @return maxReplicationStreams */
public int getMaxReplicationStreams() {
return maxReplicationStreams;
}
public int getDefaultStorageNum(BlockInfo block) {
switch (block.getBlockType()) {
case STRIPED: return ((BlockInfoStriped) block).getRealTotalBlockNum();
case CONTIGUOUS: return defaultReplication;
default:
throw new IllegalArgumentException(
"getDefaultStorageNum called with unknown BlockType: "
+ block.getBlockType());
}
}
public short getMinReplication() {
return minReplication;
}
public short getMinStorageNum(BlockInfo block) {
switch(block.getBlockType()) {
case STRIPED: return ((BlockInfoStriped) block).getRealDataBlockNum();
case CONTIGUOUS: return minReplication;
default:
throw new IllegalArgumentException(
"getMinStorageNum called with unknown BlockType: "
+ block.getBlockType());
}
}
public short getMinReplicationToBeInMaintenance() {
return minReplicationToBeInMaintenance;
}
private short getMinMaintenanceStorageNum(BlockInfo block) {
if (block.isStriped()) {
return ((BlockInfoStriped) block).getRealDataBlockNum();
} else {
return (short) Math.min(minReplicationToBeInMaintenance,
block.getReplication());
}
}
public boolean hasMinStorage(BlockInfo block) {
return countNodes(block).liveReplicas() >= getMinStorageNum(block);
}
public boolean hasMinStorage(BlockInfo block, int liveNum) {
return liveNum >= getMinStorageNum(block);
}
/**
* Commit a block of a file
*
* @param block block to be committed
* @param commitBlock - contains client reported block length and generation
* @return true if the block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private boolean commitBlock(final BlockInfo block,
final Block commitBlock) throws IOException {
if (block.getBlockUCState() == BlockUCState.COMMITTED)
return false;
assert block.getNumBytes() <= commitBlock.getNumBytes() :
"commitBlock length is less than the stored one "
+ commitBlock.getNumBytes() + " vs. " + block.getNumBytes();
if(block.getGenerationStamp() != commitBlock.getGenerationStamp()) {
throw new IOException("Commit block with mismatching GS. NN has " +
block + ", client submits " + commitBlock);
}
List<ReplicaUnderConstruction> staleReplicas =
block.commitBlock(commitBlock);
removeStaleReplicas(staleReplicas, block);
return true;
}
/**
* Commit the last block of the file and mark it as complete if it has
* meets the minimum redundancy requirement
*
* @param bc block collection
* @param commitBlock - contains client reported block length and generation
* @param iip - INodes in path to bc
* @return true if the last block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
public boolean commitOrCompleteLastBlock(BlockCollection bc,
Block commitBlock, INodesInPath iip) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
BlockInfo lastBlock = bc.getLastBlock();
if(lastBlock == null)
return false; // no blocks in file yet
if(lastBlock.isComplete())
return false; // already completed (e.g. by syncBlock)
if(lastBlock.isUnderRecovery()) {
throw new IOException("Commit or complete block " + commitBlock +
", whereas it is under recovery.");
}
final boolean committed = commitBlock(lastBlock, commitBlock);
if (committed && lastBlock.isStriped()) {
// update scheduled size for DatanodeStorages that do not store any
// internal blocks
lastBlock.getUnderConstructionFeature()
.updateStorageScheduledSize((BlockInfoStriped) lastBlock);
}
// Count replicas on decommissioning nodes, as these will not be
// decommissioned unless recovery/completing last block has finished
NumberReplicas numReplicas = countNodes(lastBlock);
int numUsableReplicas = numReplicas.liveReplicas() +
numReplicas.decommissioning() +
numReplicas.liveEnteringMaintenanceReplicas();
if (hasMinStorage(lastBlock, numUsableReplicas)) {
if (committed) {
addExpectedReplicasToPending(lastBlock);
}
completeBlock(lastBlock, iip, false);
} else if (pendingRecoveryBlocks.isUnderRecovery(lastBlock)) {
// We've just finished recovery for this block, complete
// the block forcibly disregarding number of replicas.
// This is to ignore minReplication, the block will be closed
// and then replicated out.
completeBlock(lastBlock, iip, true);
updateNeededReconstructions(lastBlock, 1, 0);
}
return committed;
}
/**
* If IBR is not sent from expected locations yet, add the datanodes to
* pendingReconstruction in order to keep RedundancyMonitor from scheduling
* the block.
*/
public void addExpectedReplicasToPending(BlockInfo blk) {
if (!blk.isStriped()) {
DatanodeStorageInfo[] expectedStorages =
blk.getUnderConstructionFeature().getExpectedStorageLocations();
if (expectedStorages.length - blk.numNodes() > 0) {
ArrayList<DatanodeDescriptor> pendingNodes = new ArrayList<>();
for (DatanodeStorageInfo storage : expectedStorages) {
DatanodeDescriptor dnd = storage.getDatanodeDescriptor();
if (blk.findStorageInfo(dnd) == null) {
pendingNodes.add(dnd);
}
}
pendingReconstruction.increment(blk,
pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()]));
}
}
}
/**
* Convert a specified block of the file to a complete block.
* @param curBlock - block to be completed
* @param iip - INodes in path to file containing curBlock; if null,
* this will be resolved internally
* @param force - force completion of the block
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private void completeBlock(BlockInfo curBlock, INodesInPath iip,
boolean force) throws IOException {
if (curBlock.isComplete()) {
return;
}
int numNodes = curBlock.numNodes();
if (!force && !hasMinStorage(curBlock, numNodes)) {
throw new IOException("Cannot complete block: "
+ "block does not satisfy minimal replication requirement.");
}
if (!force && curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
throw new IOException(
"Cannot complete block: block has not been COMMITTED by the client");
}
convertToCompleteBlock(curBlock, iip);
// Since safe-mode only counts complete blocks, and we now have
// one more complete block, we need to adjust the total up, and
// also count it as safe, if we have at least the minimum replica
// count. (We may not have the minimum replica count yet if this is
// a "forced" completion when a file is getting closed by an
// OP_CLOSE edit on the standby).
bmSafeMode.adjustBlockTotals(0, 1);
final int minStorage = curBlock.isStriped() ?
((BlockInfoStriped) curBlock).getRealDataBlockNum() : minReplication;
bmSafeMode.incrementSafeBlockCount(Math.min(numNodes, minStorage),
curBlock);
}
/**
* Convert a specified block of the file to a complete block.
* Skips validity checking and safe mode block total updates; use
* {@link BlockManager#completeBlock} to include these.
* @param curBlock - block to be completed
* @param iip - INodes in path to file containing curBlock; if null,
* this will be resolved internally
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
private void convertToCompleteBlock(BlockInfo curBlock, INodesInPath iip)
throws IOException {
curBlock.convertToCompleteBlock();
namesystem.getFSDirectory().updateSpaceForCompleteBlock(curBlock, iip);
}
/**
* Force the given block in the given file to be marked as complete,
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
public void forceCompleteBlock(final BlockInfo block) throws IOException {
List<ReplicaUnderConstruction> staleReplicas = block.commitBlock(block);
removeStaleReplicas(staleReplicas, block);
completeBlock(block, null, true);
}
/**
* Convert the last block of the file to an under construction block.<p>
* The block is converted only if the file has blocks and the last one
* is a partial block (its size is less than the preferred block size).
* The converted block is returned to the client.
* The client uses the returned block locations to form the data pipeline
* for this block.<br>
* The methods returns null if there is no partial block at the end.
* The client is supposed to allocate a new block with the next call.
*
* @param bc file
* @param bytesToRemove num of bytes to remove from block
* @return the last block locations if the block is partial or null otherwise
*/
public LocatedBlock convertLastBlockToUnderConstruction(
BlockCollection bc, long bytesToRemove) throws IOException {
BlockInfo lastBlock = bc.getLastBlock();
if (lastBlock == null ||
bc.getPreferredBlockSize() == lastBlock.getNumBytes() - bytesToRemove) {
return null;
}
assert lastBlock == getStoredBlock(lastBlock) :
"last block of the file is not in blocksMap";
DatanodeStorageInfo[] targets = getStorages(lastBlock);
// convert the last block to under construction. note no block replacement
// is happening
bc.convertLastBlockToUC(lastBlock, targets);
// Remove block from reconstruction queue.
NumberReplicas replicas = countNodes(lastBlock);
neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
replicas.readOnlyReplicas(),
replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock));
pendingReconstruction.remove(lastBlock);
// remove this block from the list of pending blocks to be deleted.
for (DatanodeStorageInfo storage : targets) {
final Block b = getBlockOnStorage(lastBlock, storage);
if (b != null) {
invalidateBlocks.remove(storage.getDatanodeDescriptor(), b);
}
}
// Adjust safe-mode totals, since under-construction blocks don't
// count in safe-mode.
bmSafeMode.adjustBlockTotals(
// decrement safe if we had enough
hasMinStorage(lastBlock, targets.length) ? -1 : 0,
// always decrement total blocks
-1);
final long fileLength = bc.computeContentSummary(
getStoragePolicySuite()).getLength();
final long pos = fileLength - lastBlock.getNumBytes();
return createLocatedBlock(null, lastBlock, pos,
BlockTokenIdentifier.AccessMode.WRITE);
}
/**
* Get all valid locations of the block
*/
private List<DatanodeStorageInfo> getValidLocations(BlockInfo block) {
final List<DatanodeStorageInfo> locations
= new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block));
for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
// filter invalidate replicas
Block b = getBlockOnStorage(block, storage);
if(b != null &&
!invalidateBlocks.contains(storage.getDatanodeDescriptor(), b)) {
locations.add(storage);
}
}
return locations;
}
private void createLocatedBlockList(
LocatedBlockBuilder locatedBlocks,
final BlockInfo[] blocks,
final long offset, final long length,
final AccessMode mode) throws IOException {
int curBlk;
long curPos = 0, blkSize = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
blkSize = blocks[curBlk].getNumBytes();
assert blkSize > 0 : "Block of size 0";
if (curPos + blkSize > offset) {
break;
}
curPos += blkSize;
}
if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file
return;
long endOff = offset + length;
do {
locatedBlocks.addBlock(
createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode));
curPos += blocks[curBlk].getNumBytes();
curBlk++;
} while (curPos < endOff
&& curBlk < blocks.length
&& !locatedBlocks.isBlockMax());
return;
}
private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
final BlockInfo[] blocks,
final long endPos, final AccessMode mode) throws IOException {
int curBlk;
long curPos = 0;
int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
long blkSize = blocks[curBlk].getNumBytes();
if (curPos + blkSize >= endPos) {
break;
}
curPos += blkSize;
}
return createLocatedBlock(locatedBlocks, blocks[curBlk], curPos, mode);
}
private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
final BlockInfo blk, final long pos, final AccessMode mode)
throws IOException {
final LocatedBlock lb = createLocatedBlock(locatedBlocks, blk, pos);
if (mode != null) {
setBlockToken(lb, mode);
}
return lb;
}
/** @return a LocatedBlock for the given block */
private LocatedBlock createLocatedBlock(LocatedBlockBuilder locatedBlocks,
final BlockInfo blk, final long pos) throws IOException {
if (!blk.isComplete()) {
final BlockUnderConstructionFeature uc = blk.getUnderConstructionFeature();
if (blk.isStriped()) {
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
blk);
return newLocatedStripedBlock(eb, storages, uc.getBlockIndices(), pos,
false);
} else {
final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations();
final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(),
blk);
return null == locatedBlocks
? newLocatedBlock(eb, storages, pos, false)
: locatedBlocks.newLocatedBlock(eb, storages, pos, false);
}
}
// get block locations
NumberReplicas numReplicas = countNodes(blk);
final int numCorruptNodes = numReplicas.corruptReplicas();
final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk);
if (numCorruptNodes != numCorruptReplicas) {
LOG.warn("Inconsistent number of corrupt replicas for {}"
+ " blockMap has {} but corrupt replicas map has {}",
blk, numCorruptNodes, numCorruptReplicas);
}
final int numNodes = blocksMap.numNodes(blk);
final boolean isCorrupt;
if (blk.isStriped()) {
BlockInfoStriped sblk = (BlockInfoStriped) blk;
isCorrupt = numCorruptReplicas != 0 &&
numReplicas.liveReplicas() < sblk.getRealDataBlockNum();
} else {
isCorrupt = numCorruptReplicas != 0 && numCorruptReplicas == numNodes;
}
int numMachines = isCorrupt ? numNodes: numNodes - numCorruptReplicas;
numMachines -= numReplicas.maintenanceNotForReadReplicas();
DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines];
final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
int j = 0, i = 0;
if (numMachines > 0) {
final boolean noCorrupt = (numCorruptReplicas == 0);
for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) {
if (storage.getState() != State.FAILED) {
final DatanodeDescriptor d = storage.getDatanodeDescriptor();
// Don't pick IN_MAINTENANCE or dead ENTERING_MAINTENANCE states.
if (d.isInMaintenance()
|| (d.isEnteringMaintenance() && !d.isAlive())) {
continue;
}
if (noCorrupt) {
machines[j++] = storage;
i = setBlockIndices(blk, blockIndices, i, storage);
} else {
final boolean replicaCorrupt = isReplicaCorrupt(blk, d);
if (isCorrupt || !replicaCorrupt) {
machines[j++] = storage;
i = setBlockIndices(blk, blockIndices, i, storage);
}
}
}
}
}
if(j < machines.length) {
machines = Arrays.copyOf(machines, j);
}
assert j == machines.length :
"isCorrupt: " + isCorrupt +
" numMachines: " + numMachines +
" numNodes: " + numNodes +
" numCorrupt: " + numCorruptNodes +
" numCorruptRepls: " + numCorruptReplicas;
final ExtendedBlock eb = new ExtendedBlock(getBlockPoolId(), blk);
return blockIndices == null
? null == locatedBlocks ? newLocatedBlock(eb, machines, pos, isCorrupt)
: locatedBlocks.newLocatedBlock(eb, machines, pos, isCorrupt)
: newLocatedStripedBlock(eb, machines, blockIndices, pos, isCorrupt);
}
/** Create a LocatedBlocks. */
public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
final long fileSizeExcludeBlocksUnderConstruction,
final boolean isFileUnderConstruction, final long offset,
final long length, final boolean needBlockToken,
final boolean inSnapshot, FileEncryptionInfo feInfo,
ErasureCodingPolicy ecPolicy)
throws IOException {
assert namesystem.hasReadLock();
if (blocks == null) {
return null;
} else if (blocks.length == 0) {
return new LocatedBlocks(0, isFileUnderConstruction,
Collections.<LocatedBlock> emptyList(), null, false, feInfo, ecPolicy);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("blocks = {}", java.util.Arrays.asList(blocks));
}
final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null;
LocatedBlockBuilder locatedBlocks = providedStorageMap
.newLocatedBlocks(Integer.MAX_VALUE)
.fileLength(fileSizeExcludeBlocksUnderConstruction)
.lastUC(isFileUnderConstruction)
.encryption(feInfo)
.erasureCoding(ecPolicy);
createLocatedBlockList(locatedBlocks, blocks, offset, length, mode);
if (!inSnapshot) {
final BlockInfo last = blocks[blocks.length - 1];
final long lastPos = last.isComplete()?
fileSizeExcludeBlocksUnderConstruction - last.getNumBytes()
: fileSizeExcludeBlocksUnderConstruction;
locatedBlocks
.lastBlock(createLocatedBlock(locatedBlocks, last, lastPos, mode))
.lastComplete(last.isComplete());
} else {
locatedBlocks
.lastBlock(createLocatedBlock(locatedBlocks, blocks,
fileSizeExcludeBlocksUnderConstruction, mode))
.lastComplete(true);
}
LocatedBlocks locations = locatedBlocks.build();
// Set caching information for the located blocks.
CacheManager cm = namesystem.getCacheManager();
if (cm != null) {
cm.setCachedLocations(locations);
}
return locations;
}
}
/** @return current access keys. */
public ExportedBlockKeys getBlockKeys() {
return isBlockTokenEnabled()? blockTokenSecretManager.exportKeys()
: ExportedBlockKeys.DUMMY_KEYS;
}
/** Generate a block token for the located block. */
public void setBlockToken(final LocatedBlock b,
final AccessMode mode) throws IOException {
if (isBlockTokenEnabled()) {
// Use cached UGI if serving RPC calls.
if (b.isStriped()) {
Preconditions.checkState(b instanceof LocatedStripedBlock);
LocatedStripedBlock sb = (LocatedStripedBlock) b;
byte[] indices = sb.getBlockIndices();
Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock());
for (int i = 0; i < indices.length; i++) {
internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]);
blockTokens[i] = blockTokenSecretManager.generateToken(
NameNode.getRemoteUser().getShortUserName(),
internalBlock, EnumSet.of(mode), b.getStorageTypes(),
b.getStorageIDs());
}
sb.setBlockTokens(blockTokens);
}
b.setBlockToken(blockTokenSecretManager.generateToken(
NameNode.getRemoteUser().getShortUserName(),
b.getBlock(), EnumSet.of(mode), b.getStorageTypes(),
b.getStorageIDs()));
}
}
void addKeyUpdateCommand(final List<DatanodeCommand> cmds,
final DatanodeDescriptor nodeinfo) {
// check access key update
if (isBlockTokenEnabled() && nodeinfo.needKeyUpdate()) {
cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
nodeinfo.setNeedKeyUpdate(false);
}
}
public DataEncryptionKey generateDataEncryptionKey() {
if (isBlockTokenEnabled() && encryptDataTransfer) {
return blockTokenSecretManager.generateDataEncryptionKey();
} else {
return null;
}
}
/**
* Clamp the specified replication between the minimum and the maximum
* replication levels.
*/
public short adjustReplication(short replication) {
return replication < minReplication? minReplication
: replication > maxReplication? maxReplication: replication;
}
/**
* Check whether the replication parameter is within the range
* determined by system configuration and throw an exception if it's not.
*
* @param src the path to the target file
* @param replication the requested replication factor
* @param clientName the name of the client node making the request
* @throws java.io.IOException thrown if the requested replication factor
* is out of bounds
*/
public void verifyReplication(String src,
short replication,
String clientName) throws IOException {
String err = null;
if (replication > maxReplication) {
err = " exceeds maximum of " + maxReplication;
} else if (replication < minReplication) {
err = " is less than the required minimum of " + minReplication;
}
if (err != null) {
throw new IOException("Requested replication factor of " + replication
+ err + " for " + src
+ (clientName == null? "": ", clientName=" + clientName));
}
}
/**
* Check if a block is replicated to at least the minimum replication.
*/
public boolean isSufficientlyReplicated(BlockInfo b) {
// Compare against the lesser of the minReplication and number of live DNs.
final int replication =
Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
return countNodes(b).liveReplicas() >= replication;
}
/** Get all blocks with location information from a datanode. */
public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
final long size, final long minBlockSize) throws
UnregisteredNodeException {
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
if (node == null) {
blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" +
" unrecorded node {}", datanode);
throw new HadoopIllegalArgumentException(
"Datanode " + datanode + " not found.");
}
int numBlocks = node.numBlocks();
if(numBlocks == 0) {
return new BlocksWithLocations(new BlockWithLocations[0]);
}
// starting from a random block
int startBlock = ThreadLocalRandom.current().nextInt(numBlocks);
Iterator<BlockInfo> iter = node.getBlockIterator(startBlock);
List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
long totalSize = 0;
BlockInfo curBlock;
while(totalSize<size && iter.hasNext()) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
if (curBlock.getNumBytes() < minBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
if(totalSize<size) {
iter = node.getBlockIterator(); // start from the beginning
for(int i=0; i<startBlock&&totalSize<size; i++) {
curBlock = iter.next();
if(!curBlock.isComplete()) continue;
if (curBlock.getNumBytes() < minBlockSize) {
continue;
}
totalSize += addBlock(curBlock, results);
}
}
return new BlocksWithLocations(
results.toArray(new BlockWithLocations[results.size()]));
}
/** Remove the blocks associated to the given datanode. */
void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
providedStorageMap.removeDatanode(node);
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
final Iterator<BlockInfo> it = storage.getBlockIterator();
//add the BlockInfos to a new collection as the
//returned iterator is not modifiable.
Collection<BlockInfo> toRemove = new ArrayList<>();
while (it.hasNext()) {
toRemove.add(it.next());
}
for (BlockInfo b : toRemove) {
removeStoredBlock(b, node);
}
}
// Remove all pending DN messages referencing this DN.
pendingDNMessages.removeAllMessagesForDatanode(node);
node.resetBlocks();
invalidateBlocks.remove(node);
}
/** Remove the blocks associated to the given DatanodeStorageInfo. */
void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) {
assert namesystem.hasWriteLock();
final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
Collection<BlockInfo> toRemove = new ArrayList<>();
while (it.hasNext()) {
toRemove.add(it.next());
}
for (BlockInfo block : toRemove) {
removeStoredBlock(block, node);
final Block b = getBlockOnStorage(block, storageInfo);
if (b != null) {
invalidateBlocks.remove(node, b);
}
}
checkSafeMode();
LOG.info("Removed blocks associated with storage {} from DataNode {}",
storageInfo, node);
}
/**
* Adds block to list of blocks which will be invalidated on specified
* datanode and log the operation
*/
void addToInvalidates(final Block block, final DatanodeInfo datanode) {
if (!isPopulatingReplQueues()) {
return;
}
invalidateBlocks.add(block, datanode, true);
}
/**
* Adds block to list of blocks which will be invalidated on all its
* datanodes.
*/
private void addToInvalidates(BlockInfo storedBlock) {
if (!isPopulatingReplQueues()) {
return;
}
StringBuilder datanodes = blockLog.isDebugEnabled()
? new StringBuilder() : null;
for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
final Block b = getBlockOnStorage(storedBlock, storage);
if (b != null) {
invalidateBlocks.add(b, node, false);
if (datanodes != null) {
datanodes.append(node).append(" ");
}
}
}
if (datanodes != null && datanodes.length() != 0) {
blockLog.debug("BLOCK* addToInvalidates: {} {}", storedBlock, datanodes);
}
}
private Block getBlockOnStorage(BlockInfo storedBlock,
DatanodeStorageInfo storage) {
return storedBlock.isStriped() ?
((BlockInfoStriped) storedBlock).getBlockOnStorage(storage) : storedBlock;
}
/**
* Mark the block belonging to datanode as corrupt
* @param blk Block to be marked as corrupt
* @param dn Datanode which holds the corrupt replica
* @param storageID if known, null otherwise.
* @param reason a textual reason why the block should be marked corrupt,
* for logging purposes
*/
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
final DatanodeInfo dn, String storageID, String reason) throws IOException {
assert namesystem.hasWriteLock();
final Block reportedBlock = blk.getLocalBlock();
final BlockInfo storedBlock = getStoredBlock(reportedBlock);
if (storedBlock == null) {
// Check if the replica is in the blockMap, if not
// ignore the request for now. This could happen when BlockScanner
// thread of Datanode reports bad block before Block reports are sent
// by the Datanode on startup
blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found", blk);
return;
}
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
if (node == null) {
throw new IOException("Cannot mark " + blk
+ " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
+ ") does not exist");
}
DatanodeStorageInfo storage = null;
if (storageID != null) {
storage = node.getStorageInfo(storageID);
}
if (storage == null) {
storage = storedBlock.findStorageInfo(node);
}
if (storage == null) {
blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
blk, dn);
return;
}
markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
storage, node);
}
/**
* Mark a replica (of a contiguous block) or an internal block (of a striped
* block group) as corrupt.
* @param b Indicating the reported bad block and the corresponding BlockInfo
* stored in blocksMap.
* @param storageInfo storage that contains the block, if known. null otherwise.
*/
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
DatanodeStorageInfo storageInfo,
DatanodeDescriptor node) throws IOException {
if (b.getStored().isDeleted()) {
blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" +
" corrupt as it does not belong to any file", b);
addToInvalidates(b.getCorrupted(), node);
return;
}
short expectedRedundancies =
getExpectedRedundancyNum(b.getStored());
// Add replica to the data-node if it is not already there
if (storageInfo != null) {
storageInfo.addBlock(b.getStored(), b.getCorrupted());
}
// Add this replica to corruptReplicas Map. For striped blocks, we always
// use the id of whole striped block group when adding to corruptReplicas
Block corrupted = new Block(b.getCorrupted());
if (b.getStored().isStriped()) {
corrupted.setBlockId(b.getStored().getBlockId());
}
corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
b.getReasonCode(), b.getStored().isStriped());
NumberReplicas numberOfReplicas = countNodes(b.getStored());
boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=
expectedRedundancies;
boolean minReplicationSatisfied = hasMinStorage(b.getStored(),
numberOfReplicas.liveReplicas());
boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
(numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
expectedRedundancies;
boolean corruptedDuringWrite = minReplicationSatisfied &&
b.isCorruptedDuringWrite();
// case 1: have enough number of live replicas
// case 2: corrupted replicas + live replicas > Replication factor
// case 3: Block is marked corrupt due to failure while writing. In this
// case genstamp will be different than that of valid block.
// In all these cases we can delete the replica.
// In case of 3, rbw block will be deleted and valid block can be replicated
if (hasEnoughLiveReplicas || hasMoreCorruptReplicas
|| corruptedDuringWrite) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(b, node, numberOfReplicas);
} else if (isPopulatingReplQueues()) {
// add the block to neededReconstruction
updateNeededReconstructions(b.getStored(), -1, 0);
}
}
/**
* Invalidates the given block on the given datanode.
* @return true if the block was successfully invalidated and no longer
* present in the BlocksMap
*/
private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn,
NumberReplicas nr) throws IOException {
blockLog.debug("BLOCK* invalidateBlock: {} on {}", b, dn);
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
if (node == null) {
throw new IOException("Cannot invalidate " + b
+ " because datanode " + dn + " does not exist.");
}
// Check how many copies we have of the block
if (nr.replicasOnStaleNodes() > 0) {
blockLog.debug("BLOCK* invalidateBlocks: postponing " +
"invalidation of {} on {} because {} replica(s) are located on " +
"nodes with potentially out-of-date block reports", b, dn,
nr.replicasOnStaleNodes());
postponeBlock(b.getCorrupted());
return false;
} else {
// we already checked the number of replicas in the caller of this
// function and know there are enough live replicas, so we can delete it.
addToInvalidates(b.getCorrupted(), dn);
removeStoredBlock(b.getStored(), node);
blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.",
b, dn);
return true;
}
}
public void setPostponeBlocksFromFuture(boolean postpone) {
this.shouldPostponeBlocksFromFuture = postpone;
}
private void postponeBlock(Block blk) {
postponedMisreplicatedBlocks.add(blk);
}
void updateState() {
pendingReconstructionBlocksCount = pendingReconstruction.size();
lowRedundancyBlocksCount = neededReconstruction.size();
corruptReplicaBlocksCount = corruptReplicas.size();
}
/** Return number of low redundancy blocks but not missing blocks. */
public int getUnderReplicatedNotMissingBlocks() {
return neededReconstruction.getLowRedundancyBlockCount();
}
/**
* Schedule blocks for deletion at datanodes
* @param nodesToProcess number of datanodes to schedule deletion work
* @return total number of block for deletion
*/
int computeInvalidateWork(int nodesToProcess) {
final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes();
Collections.shuffle(nodes);
nodesToProcess = Math.min(nodes.size(), nodesToProcess);
int blockCnt = 0;
for (DatanodeInfo dnInfo : nodes) {
int blocks = invalidateWorkForOneNode(dnInfo);
if (blocks > 0) {
blockCnt += blocks;
if (--nodesToProcess == 0) {
break;
}
}
}
return blockCnt;
}
/**
* Scan blocks in {@link #neededReconstruction} and assign reconstruction
* (replication or erasure coding) work to data-nodes they belong to.
*
* The number of process blocks equals either twice the number of live
* data-nodes or the number of low redundancy blocks whichever is less.
*
* @return number of blocks scheduled for reconstruction during this
* iteration.
*/
int computeBlockReconstructionWork(int blocksToProcess) {
List<List<BlockInfo>> blocksToReconstruct = null;
namesystem.writeLock();
try {
// Choose the blocks to be reconstructed
blocksToReconstruct = neededReconstruction
.chooseLowRedundancyBlocks(blocksToProcess);
} finally {
namesystem.writeUnlock();
}
return computeReconstructionWorkForBlocks(blocksToReconstruct);
}
/**
* Reconstruct a set of blocks to full strength through replication or
* erasure coding
*
* @param blocksToReconstruct blocks to be reconstructed, for each priority
* @return the number of blocks scheduled for replication
*/
@VisibleForTesting
int computeReconstructionWorkForBlocks(
List<List<BlockInfo>> blocksToReconstruct) {
int scheduledWork = 0;
List<BlockReconstructionWork> reconWork = new LinkedList<>();
// Step 1: categorize at-risk blocks into replication and EC tasks
namesystem.writeLock();
try {
synchronized (neededReconstruction) {
for (int priority = 0; priority < blocksToReconstruct
.size(); priority++) {
for (BlockInfo block : blocksToReconstruct.get(priority)) {
BlockReconstructionWork rw = scheduleReconstruction(block,
priority);
if (rw != null) {
reconWork.add(rw);
}
}
}
}
} finally {
namesystem.writeUnlock();
}
// Step 2: choose target nodes for each reconstruction task
final Set<Node> excludedNodes = new HashSet<>();
for(BlockReconstructionWork rw : reconWork){
// Exclude all of the containing nodes from being targets.
// This list includes decommissioning or corrupt nodes.
excludedNodes.clear();
for (DatanodeDescriptor dn : rw.getContainingNodes()) {
excludedNodes.add(dn);
}
// choose replication targets: NOT HOLDING THE GLOBAL LOCK
final BlockPlacementPolicy placementPolicy =
placementPolicies.getPolicy(rw.getBlock().getBlockType());
rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes);
}
// Step 3: add tasks to the DN
namesystem.writeLock();
try {
for(BlockReconstructionWork rw : reconWork){
final DatanodeStorageInfo[] targets = rw.getTargets();
if(targets == null || targets.length == 0){
rw.resetTargets();
continue;
}
synchronized (neededReconstruction) {
if (validateReconstructionWork(rw)) {
scheduledWork++;
}
}
}
} finally {
namesystem.writeUnlock();
}
if (blockLog.isDebugEnabled()) {
// log which blocks have been scheduled for reconstruction
for(BlockReconstructionWork rw : reconWork){
DatanodeStorageInfo[] targets = rw.getTargets();
if (targets != null && targets.length != 0) {
StringBuilder targetList = new StringBuilder("datanode(s)");
for (DatanodeStorageInfo target : targets) {
targetList.append(' ');
targetList.append(target.getDatanodeDescriptor());
}
blockLog.debug("BLOCK* ask {} to replicate {} to {}", rw.getSrcNodes(),
rw.getBlock(), targetList);
}
}
blockLog.debug(
"BLOCK* neededReconstruction = {} pendingReconstruction = {}",
neededReconstruction.size(), pendingReconstruction.size());
}
return scheduledWork;
}
// Check if the number of live + pending replicas satisfies
// the expected redundancy.
boolean hasEnoughEffectiveReplicas(BlockInfo block,
NumberReplicas numReplicas, int pendingReplicaNum) {
int required = getExpectedLiveRedundancyNum(block, numReplicas);
int numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplicaNum;
return (numEffectiveReplicas >= required) &&
(pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
}
BlockReconstructionWork scheduleReconstruction(BlockInfo block,
int priority) {
// skip abandoned block or block reopened for append
if (block.isDeleted() || !block.isCompleteOrCommitted()) {
// remove from neededReconstruction
neededReconstruction.remove(block, priority);
return null;
}
// get a source data-node
List<DatanodeDescriptor> containingNodes = new ArrayList<>();
List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<>();
NumberReplicas numReplicas = new NumberReplicas();
List<Byte> liveBlockIndices = new ArrayList<>();
final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
containingNodes, liveReplicaNodes, numReplicas,
liveBlockIndices, priority);
short requiredRedundancy = getExpectedLiveRedundancyNum(block,
numReplicas);
if(srcNodes == null || srcNodes.length == 0) {
// block can not be reconstructed from any node
LOG.debug("Block {} cannot be reconstructed from any node", block);
NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
return null;
}
// liveReplicaNodes can include READ_ONLY_SHARED replicas which are
// not included in the numReplicas.liveReplicas() count
assert liveReplicaNodes.size() >= numReplicas.liveReplicas();
int pendingNum = pendingReconstruction.getNumReplicas(block);
if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
neededReconstruction.remove(block, priority);
blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
" it has enough replicas", block);
NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
return null;
}
int additionalReplRequired;
if (numReplicas.liveReplicas() < requiredRedundancy) {
additionalReplRequired = requiredRedundancy - numReplicas.liveReplicas()
- pendingNum;
} else {
additionalReplRequired = 1; // Needed on a new rack
}
final BlockCollection bc = getBlockCollection(block);
if (block.isStriped()) {
if (pendingNum > 0) {
// Wait the previous reconstruction to finish.
NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
return null;
}
// should reconstruct all the internal blocks before scheduling
// replication task for decommissioning node(s).
if (additionalReplRequired - numReplicas.decommissioning() -
numReplicas.liveEnteringMaintenanceReplicas() > 0) {
additionalReplRequired = additionalReplRequired -
numReplicas.decommissioning() -
numReplicas.liveEnteringMaintenanceReplicas();
}
byte[] indices = new byte[liveBlockIndices.size()];
for (int i = 0 ; i < liveBlockIndices.size(); i++) {
indices[i] = liveBlockIndices.get(i);
}
return new ErasureCodingWork(getBlockPoolId(), block, bc, srcNodes,
containingNodes, liveReplicaNodes, additionalReplRequired,
priority, indices);
} else {
return new ReplicationWork(block, bc, srcNodes,
containingNodes, liveReplicaNodes, additionalReplRequired,
priority);
}
}
private boolean isInNewRack(DatanodeDescriptor[] srcs,
DatanodeDescriptor target) {
LOG.debug("check if target {} increases racks, srcs={}", target,
Arrays.asList(srcs));
for (DatanodeDescriptor src : srcs) {
if (!src.isDecommissionInProgress() &&
src.getNetworkLocation().equals(target.getNetworkLocation())) {
LOG.debug("the target {} is in the same rack with src {}", target, src);
return false;
}
}
return true;
}
private boolean validateReconstructionWork(BlockReconstructionWork rw) {
BlockInfo block = rw.getBlock();
int priority = rw.getPriority();
// Recheck since global lock was released
// skip abandoned block or block reopened for append
if (block.isDeleted() || !block.isCompleteOrCommitted()) {
neededReconstruction.remove(block, priority);
rw.resetTargets();
return false;
}
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
final short requiredRedundancy =
getExpectedLiveRedundancyNum(block, numReplicas);
final int pendingNum = pendingReconstruction.getNumReplicas(block);
if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
neededReconstruction.remove(block, priority);
rw.resetTargets();
blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
" it has enough replicas", block);
return false;
}
DatanodeStorageInfo[] targets = rw.getTargets();
if ((numReplicas.liveReplicas() >= requiredRedundancy) &&
(!isPlacementPolicySatisfied(block)) ) {
if (!isInNewRack(rw.getSrcNodes(), targets[0].getDatanodeDescriptor())) {
// No use continuing, unless a new rack in this case
return false;
}
// mark that the reconstruction work is to replicate internal block to a
// new rack.
rw.setNotEnoughRack();
}
// Add block to the datanode's task list
rw.addTaskToDatanode(numReplicas);
DatanodeStorageInfo.incrementBlocksScheduled(targets);
// Move the block-replication into a "pending" state.
// The reason we use 'pending' is so we can retry
// reconstructions that fail after an appropriate amount of time.
pendingReconstruction.increment(block,
DatanodeStorageInfo.toDatanodeDescriptors(targets));
blockLog.debug("BLOCK* block {} is moved from neededReconstruction to "
+ "pendingReconstruction", block);
int numEffectiveReplicas = numReplicas.liveReplicas() + pendingNum;
// remove from neededReconstruction
if(numEffectiveReplicas + targets.length >= requiredRedundancy) {
neededReconstruction.remove(block, priority);
}
return true;
}
/** Choose target for WebHDFS redirection. */
public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src,
DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
return placementPolicies.getPolicy(CONTIGUOUS).chooseTarget(src, 1,
clientnode, Collections.<DatanodeStorageInfo>emptyList(), false,
excludes, blocksize, storagePolicySuite.getDefaultPolicy(), null);
}
/** Choose target for getting additional datanodes for an existing pipeline. */
public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src,
int numAdditionalNodes,
Node clientnode,
List<DatanodeStorageInfo> chosen,
Set<Node> excludes,
long blocksize,
byte storagePolicyID,
BlockType blockType) {
final BlockStoragePolicy storagePolicy =
storagePolicySuite.getPolicy(storagePolicyID);
final BlockPlacementPolicy blockplacement =
placementPolicies.getPolicy(blockType);
return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
chosen, true, excludes, blocksize, storagePolicy, null);
}
/**
* Choose target datanodes for creating a new block.
*
* @throws IOException
* if the number of targets < minimum replication.
* @see BlockPlacementPolicy#chooseTarget(String, int, Node,
* Set, long, List, BlockStoragePolicy, EnumSet)
*/
public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src,
final int numOfReplicas, final Node client,
final Set<Node> excludedNodes,
final long blocksize,
final List<String> favoredNodes,
final byte storagePolicyID,
final BlockType blockType,
final ErasureCodingPolicy ecPolicy,
final EnumSet<AddBlockFlag> flags) throws IOException {
List<DatanodeDescriptor> favoredDatanodeDescriptors =
getDatanodeDescriptors(favoredNodes);
final BlockStoragePolicy storagePolicy =
storagePolicySuite.getPolicy(storagePolicyID);
final BlockPlacementPolicy blockplacement =
placementPolicies.getPolicy(blockType);
final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
numOfReplicas, client, excludedNodes, blocksize,
favoredDatanodeDescriptors, storagePolicy, flags);
final String errorMessage = "File %s could only be written to %d of " +
"the %d %s. There are %d datanode(s) running and %s "
+ "node(s) are excluded in this operation.";
if (blockType == BlockType.CONTIGUOUS && targets.length < minReplication) {
throw new IOException(String.format(errorMessage, src,
targets.length, minReplication, "minReplication nodes",
getDatanodeManager().getNetworkTopology().getNumOfLeaves(),
(excludedNodes == null? "no": excludedNodes.size())));
} else if (blockType == BlockType.STRIPED &&
targets.length < ecPolicy.getNumDataUnits()) {
throw new IOException(
String.format(errorMessage, src, targets.length,
ecPolicy.getNumDataUnits(),
String.format("required nodes for %s", ecPolicy.getName()),
getDatanodeManager().getNetworkTopology().getNumOfLeaves(),
(excludedNodes == null ? "no" : excludedNodes.size())));
}
return targets;
}
/**
* Get list of datanode descriptors for given list of nodes. Nodes are
* hostaddress:port or just hostaddress.
*/
List<DatanodeDescriptor> getDatanodeDescriptors(List<String> nodes) {
List<DatanodeDescriptor> datanodeDescriptors = null;
if (nodes != null) {
datanodeDescriptors = new ArrayList<DatanodeDescriptor>(nodes.size());
for (int i = 0; i < nodes.size(); i++) {
DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i));
if (node != null) {
datanodeDescriptors.add(node);
}
}
}
return datanodeDescriptors;
}
/**
* Get the associated {@link DatanodeDescriptor} for the storage.
* If the storage is of type PROVIDED, one of the nodes that reported
* PROVIDED storage are returned. If not, this is equivalent to
* {@code storage.getDatanodeDescriptor()}.
* @param storage
* @return the associated {@link DatanodeDescriptor}.
*/
private DatanodeDescriptor getDatanodeDescriptorFromStorage(
DatanodeStorageInfo storage) {
if (storage.getStorageType() == StorageType.PROVIDED) {
return providedStorageMap.chooseProvidedDatanode();
}
return storage.getDatanodeDescriptor();
}
/**
* Parse the data-nodes the block belongs to and choose a certain number
* from them to be the recovery sources.
*
* We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes
* since the former do not have write traffic and hence are less busy.
* We do not use already decommissioned nodes as a source, unless there is
* no other choice.
* Otherwise we randomly choose nodes among those that did not reach their
* replication limits. However, if the recovery work is of the highest
* priority and all nodes have reached their replication limits, we will
* randomly choose the desired number of nodes despite the replication limit.
*
* In addition form a list of all nodes containing the block
* and calculate its replication numbers.
*
* @param block Block for which a replication source is needed
* @param containingNodes List to be populated with nodes found to contain
* the given block
* @param nodesContainingLiveReplicas List to be populated with nodes found
* to contain live replicas of the given
* block
* @param numReplicas NumberReplicas instance to be initialized with the
* counts of live, corrupt, excess, and decommissioned
* replicas of the given block.
* @param liveBlockIndices List to be populated with indices of healthy
* blocks in a striped block group
* @param priority integer representing replication priority of the given
* block
* @return the array of DatanodeDescriptor of the chosen nodes from which to
* recover the given block
*/
@VisibleForTesting
DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block,
List<DatanodeDescriptor> containingNodes,
List<DatanodeStorageInfo> nodesContainingLiveReplicas,
NumberReplicas numReplicas,
List<Byte> liveBlockIndices, int priority) {
containingNodes.clear();
nodesContainingLiveReplicas.clear();
List<DatanodeDescriptor> srcNodes = new ArrayList<>();
liveBlockIndices.clear();
final boolean isStriped = block.isStriped();
DatanodeDescriptor decommissionedSrc = null;
BitSet bitSet = isStriped ?
new BitSet(((BlockInfoStriped) block).getTotalBlockNum()) : null;
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final DatanodeDescriptor node = getDatanodeDescriptorFromStorage(storage);
final StoredReplicaState state = checkReplicaOnStorage(numReplicas, block,
storage, corruptReplicas.getNodes(block), false);
if (state == StoredReplicaState.LIVE) {
if (storage.getStorageType() == StorageType.PROVIDED) {
storage = new DatanodeStorageInfo(node, storage.getStorageID(),
storage.getStorageType(), storage.getState());
}
nodesContainingLiveReplicas.add(storage);
}
containingNodes.add(node);
// do not select the replica if it is corrupt or excess
if (state == StoredReplicaState.CORRUPT ||
state == StoredReplicaState.EXCESS) {
continue;
}
// Never use maintenance node not suitable for read
// or unknown state replicas.
if (state == null
|| state == StoredReplicaState.MAINTENANCE_NOT_FOR_READ) {
continue;
}
// Save the live decommissioned replica in case we need it. Such replicas
// are normally not used for replication, but if nothing else is
// available, one can be selected as a source.
if (state == StoredReplicaState.DECOMMISSIONED) {
if (decommissionedSrc == null ||
ThreadLocalRandom.current().nextBoolean()) {
decommissionedSrc = node;
}
continue;
}
if (priority != LowRedundancyBlocks.QUEUE_HIGHEST_PRIORITY
&& (!node.isDecommissionInProgress() && !node.isEnteringMaintenance())
&& node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) {
continue; // already reached replication limit
}
if (node.getNumberOfBlocksToBeReplicated() >= replicationStreamsHardLimit) {
continue;
}
if(isStriped || srcNodes.isEmpty()) {
srcNodes.add(node);
if (isStriped) {
byte blockIndex = ((BlockInfoStriped) block).
getStorageBlockIndex(storage);
liveBlockIndices.add(blockIndex);
if (!bitSet.get(blockIndex)) {
bitSet.set(blockIndex);
} else if (state == StoredReplicaState.LIVE) {
numReplicas.subtract(StoredReplicaState.LIVE, 1);
numReplicas.add(StoredReplicaState.REDUNDANT, 1);
}
}
continue;
}
// for replicated block, switch to a different node randomly
// this to prevent from deterministically selecting the same node even
// if the node failed to replicate the block on previous iterations
if (ThreadLocalRandom.current().nextBoolean()) {
srcNodes.set(0, node);
}
}
// Pick a live decommissioned replica, if nothing else is available.
if (!isStriped && nodesContainingLiveReplicas.isEmpty() &&
srcNodes.isEmpty() && decommissionedSrc != null) {
srcNodes.add(decommissionedSrc);
}
return srcNodes.toArray(new DatanodeDescriptor[srcNodes.size()]);
}
/**
* If there were any reconstruction requests that timed out, reap them
* and put them back into the neededReconstruction queue
*/
void processPendingReconstructions() {
BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
if (timedOutItems != null) {
namesystem.writeLock();
try {
for (int i = 0; i < timedOutItems.length; i++) {
/*
* Use the blockinfo from the blocksmap to be certain we're working
* with the most up-to-date block information (e.g. genstamp).
*/
BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]);
if (bi == null) {
continue;
}
NumberReplicas num = countNodes(timedOutItems[i]);
if (isNeededReconstruction(bi, num)) {
neededReconstruction.add(bi, num.liveReplicas(),
num.readOnlyReplicas(), num.outOfServiceReplicas(),
getExpectedRedundancyNum(bi));
}
}
} finally {
namesystem.writeUnlock();
}
/* If we know the target datanodes where the replication timedout,
* we could invoke decBlocksScheduled() on it. Its ok for now.
*/
}
}
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
assert namesystem.hasReadLock();
DatanodeDescriptor node = null;
try {
node = datanodeManager.getDatanode(nodeReg);
} catch (UnregisteredNodeException e) {
LOG.warn("Unregistered datanode {}", nodeReg);
return 0;
}
if (node == null) {
LOG.warn("Failed to find datanode {}", nodeReg);
return 0;
}
// Request a new block report lease. The BlockReportLeaseManager has
// its own internal locking.
long leaseId = blockReportLeaseManager.requestLease(node);
BlockManagerFaultInjector.getInstance().
requestBlockReportLease(node, leaseId);
return leaseId;
}
public void registerDatanode(DatanodeRegistration nodeReg)
throws IOException {
assert namesystem.hasWriteLock();
datanodeManager.registerDatanode(nodeReg);
bmSafeMode.checkSafeMode();
}
/**
* Set the total number of blocks in the system.
* If safe mode is not currently on, this is a no-op.
*/
public void setBlockTotal(long total) {
if (bmSafeMode.isInSafeMode()) {
bmSafeMode.setBlockTotal(total);
bmSafeMode.checkSafeMode();
}
}
public boolean isInSafeMode() {
return bmSafeMode.isInSafeMode();
}
public String getSafeModeTip() {
return bmSafeMode.getSafeModeTip();
}
public boolean leaveSafeMode(boolean force) {
return bmSafeMode.leaveSafeMode(force);
}
public void checkSafeMode() {
bmSafeMode.checkSafeMode();
}
public long getBytesInFuture() {
return bmSafeMode.getBytesInFuture();
}
public long getBytesInFutureReplicatedBlocks() {
return bmSafeMode.getBytesInFutureBlocks();
}
public long getBytesInFutureECBlockGroups() {
return bmSafeMode.getBytesInFutureECBlockGroups();
}
/**
* Removes the blocks from blocksmap and updates the safemode blocks total.
* @param blocks An instance of {@link BlocksMapUpdateInfo} which contains a
* list of blocks that need to be removed from blocksMap
*/
public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
assert namesystem.hasWriteLock();
// In the case that we are a Standby tailing edits from the
// active while in safe-mode, we need to track the total number
// of blocks and safe blocks in the system.
boolean trackBlockCounts = bmSafeMode.isSafeModeTrackingBlocks();
int numRemovedComplete = 0, numRemovedSafe = 0;
for (BlockInfo b : blocks.getToDeleteList()) {
if (trackBlockCounts) {
if (b.isComplete()) {
numRemovedComplete++;
if (hasMinStorage(b, b.numNodes())) {
numRemovedSafe++;
}
}
}
removeBlock(b);
}
if (trackBlockCounts) {
LOG.debug("Adjusting safe-mode totals for deletion."
+ "decreasing safeBlocks by {}, totalBlocks by {}",
numRemovedSafe, numRemovedComplete);
bmSafeMode.adjustBlockTotals(-numRemovedSafe, -numRemovedComplete);
}
}
public long getProvidedCapacity() {
return providedStorageMap.getCapacity();
}
public void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports,
long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes,
VolumeFailureSummary volumeFailureSummary) {
for (StorageReport report: reports) {
providedStorageMap.updateStorage(node, report.getStorage());
}
node.updateHeartbeat(reports, cacheCapacity, cacheUsed, xceiverCount,
failedVolumes, volumeFailureSummary);
}
/**
* StatefulBlockInfo is used to build the "toUC" list, which is a list of
* updates to the information about under-construction blocks.
* Besides the block in question, it provides the ReplicaState
* reported by the datanode in the block report.
*/
static class StatefulBlockInfo {
final BlockInfo storedBlock; // should be UC block
final Block reportedBlock;
final ReplicaState reportedState;
StatefulBlockInfo(BlockInfo storedBlock,
Block reportedBlock, ReplicaState reportedState) {
Preconditions.checkArgument(!storedBlock.isComplete());
this.storedBlock = storedBlock;
this.reportedBlock = reportedBlock;
this.reportedState = reportedState;
}
}
private static class BlockInfoToAdd {
final BlockInfo stored;
final Block reported;
BlockInfoToAdd(BlockInfo stored, Block reported) {
this.stored = stored;
this.reported = reported;
}
}
/**
* The given storage is reporting all its blocks.
* Update the (storage-->block list) and (block-->storage list) maps.
*
* @return true if all known storages of the given DN have finished reporting.
* @throws IOException
*/
public boolean processReport(final DatanodeID nodeID,
final DatanodeStorage storage,
final BlockListAsLongs newReport,
BlockReportContext context) throws IOException {
namesystem.writeLock();
final long startTime = Time.monotonicNow(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
Collection<Block> invalidatedBlocks = Collections.emptyList();
String strBlockReportId =
context != null ? Long.toHexString(context.getReportId()) : "";
try {
node = datanodeManager.getDatanode(nodeID);
if (node == null || !node.isRegistered()) {
throw new IOException(
"ProcessReport from dead or unregistered node: " + nodeID);
}
// To minimize startup time, we discard any second (or later) block reports
// that we receive while still in startup phase.
// Register DN with provided storage, not with storage owned by DN
// DN should still have a ref to the DNStorageInfo.
DatanodeStorageInfo storageInfo =
providedStorageMap.getStorage(node, storage);
if (storageInfo == null) {
// We handle this for backwards compatibility.
storageInfo = node.updateStorage(storage);
}
if (namesystem.isInStartupSafeMode()
&& storageInfo.getBlockReportCount() > 0) {
blockLog.info("BLOCK* processReport 0x{}: "
+ "discarded non-initial block report from {}"
+ " because namenode still in startup phase",
strBlockReportId, nodeID);
blockReportLeaseManager.removeLease(node);
return !node.hasStaleStorages();
}
if (context != null) {
if (!blockReportLeaseManager.checkLease(node, startTime,
context.getLeaseId())) {
return false;
}
}
if (storageInfo.getBlockReportCount() == 0) {
// The first block report can be processed a lot more efficiently than
// ordinary block reports. This shortens restart times.
blockLog.info("BLOCK* processReport 0x{}: Processing first "
+ "storage report for {} from datanode {}",
strBlockReportId,
storageInfo.getStorageID(),
nodeID.getDatanodeUuid());
processFirstBlockReport(storageInfo, newReport);
} else {
// Block reports for provided storage are not
// maintained by DN heartbeats
if (!StorageType.PROVIDED.equals(storageInfo.getStorageType())) {
invalidatedBlocks = processReport(storageInfo, newReport, context);
}
}
storageInfo.receivedBlockReport();
} finally {
endTime = Time.monotonicNow();
namesystem.writeUnlock();
}
for (Block b : invalidatedBlocks) {
blockLog.debug("BLOCK* processReport 0x{}: {} on node {} size {} does not"
+ " belong to any file", strBlockReportId, b, node, b.getNumBytes());
}
// Log the block report processing stats from Namenode perspective
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
if (metrics != null) {
metrics.addStorageBlockReport((int) (endTime - startTime));
}
blockLog.info("BLOCK* processReport 0x{}: from storage {} node {}, " +
"blocks: {}, hasStaleStorage: {}, processing time: {} msecs, " +
"invalidatedBlocks: {}", strBlockReportId, storage.getStorageID(),
nodeID, newReport.getNumberOfBlocks(),
node.hasStaleStorages(), (endTime - startTime),
invalidatedBlocks.size());
return !node.hasStaleStorages();
}
public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
final BlockReportContext context) throws IOException {
namesystem.writeLock();
DatanodeDescriptor node;
try {
node = datanodeManager.getDatanode(nodeID);
if (context != null) {
if (context.getTotalRpcs() == context.getCurRpc() + 1) {
long leaseId = this.getBlockReportLeaseManager().removeLease(node);
BlockManagerFaultInjector.getInstance().
removeBlockReportLease(node, leaseId);
node.setLastBlockReportTime(now());
node.setLastBlockReportMonotonic(Time.monotonicNow());
}
LOG.debug("Processing RPC with index {} out of total {} RPCs in "
+ "processReport 0x{}", context.getCurRpc(),
context.getTotalRpcs(), Long.toHexString(context.getReportId()));
}
} finally {
namesystem.writeUnlock();
}
}
/**
* Rescan the list of blocks which were previously postponed.
*/
void rescanPostponedMisreplicatedBlocks() {
if (getPostponedMisreplicatedBlocksCount() == 0) {
return;
}
namesystem.writeLock();
long startTime = Time.monotonicNow();
long startSize = postponedMisreplicatedBlocks.size();
try {
Iterator<Block> it = postponedMisreplicatedBlocks.iterator();
for (int i=0; i < blocksPerPostpondedRescan && it.hasNext(); i++) {
Block b = it.next();
it.remove();
BlockInfo bi = getStoredBlock(b);
if (bi == null) {
LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
"Postponed mis-replicated block {} no longer found " +
"in block map.", b);
continue;
}
MisReplicationResult res = processMisReplicatedBlock(bi);
LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " +
"Re-scanned block {}, result is {}", b, res);
if (res == MisReplicationResult.POSTPONE) {
rescannedMisreplicatedBlocks.add(b);
}
}
} finally {
postponedMisreplicatedBlocks.addAll(rescannedMisreplicatedBlocks);
rescannedMisreplicatedBlocks.clear();
long endSize = postponedMisreplicatedBlocks.size();
namesystem.writeUnlock();
LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" +
" msecs. {} blocks are left. {} blocks were removed.",
(Time.monotonicNow() - startTime), endSize, (startSize - endSize));
}
}
Collection<Block> processReport(
final DatanodeStorageInfo storageInfo,
final BlockListAsLongs report,
BlockReportContext context) throws IOException {
// Normal case:
// Modify the (block-->datanode) map, according to the difference
// between the old and new block report.
//
Collection<BlockInfoToAdd> toAdd = new LinkedList<>();
Collection<BlockInfo> toRemove = new TreeSet<>();
Collection<Block> toInvalidate = new LinkedList<>();
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<>();
Collection<StatefulBlockInfo> toUC = new LinkedList<>();
boolean sorted = false;
String strBlockReportId = "";
if (context != null) {
sorted = context.isSorted();
strBlockReportId = Long.toHexString(context.getReportId());
}
Iterable<BlockReportReplica> sortedReport;
if (!sorted) {
blockLog.warn("BLOCK* processReport 0x{}: Report from the DataNode ({}) "
+ "is unsorted. This will cause overhead on the NameNode "
+ "which needs to sort the Full BR. Please update the "
+ "DataNode to the same version of Hadoop HDFS as the "
+ "NameNode ({}).",
strBlockReportId,
storageInfo.getDatanodeDescriptor().getDatanodeUuid(),
VersionInfo.getVersion());
Set<BlockReportReplica> set = new FoldedTreeSet<>();
for (BlockReportReplica iblk : report) {
set.add(new BlockReportReplica(iblk));
}
sortedReport = set;
} else {
sortedReport = report;
}
reportDiffSorted(storageInfo, sortedReport,
toAdd, toRemove, toInvalidate, toCorrupt, toUC);
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
// Process the blocks on each queue
for (StatefulBlockInfo b : toUC) {
addStoredBlockUnderConstruction(b, storageInfo);
}
for (BlockInfo b : toRemove) {
removeStoredBlock(b, node);
}
int numBlocksLogged = 0;
for (BlockInfoToAdd b : toAdd) {
addStoredBlock(b.stored, b.reported, storageInfo, null,
numBlocksLogged < maxNumBlocksToLog);
numBlocksLogged++;
}
if (numBlocksLogged > maxNumBlocksToLog) {
blockLog.info("BLOCK* processReport 0x{}: logged info for {} of {} " +
"reported.", strBlockReportId, maxNumBlocksToLog, numBlocksLogged);
}
for (Block b : toInvalidate) {
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
markBlockAsCorrupt(b, storageInfo, node);
}
return toInvalidate;
}
/**
* Mark block replicas as corrupt except those on the storages in
* newStorages list.
*/
public void markBlockReplicasAsCorrupt(Block oldBlock,
BlockInfo block,
long oldGenerationStamp, long oldNumBytes,
DatanodeStorageInfo[] newStorages) throws IOException {
assert namesystem.hasWriteLock();
BlockToMarkCorrupt b = null;
if (block.getGenerationStamp() != oldGenerationStamp) {
b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
"genstamp does not match " + oldGenerationStamp
+ " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
} else if (block.getNumBytes() != oldNumBytes) {
b = new BlockToMarkCorrupt(oldBlock, block,
"length does not match " + oldNumBytes
+ " : " + block.getNumBytes(), Reason.SIZE_MISMATCH);
} else {
return;
}
for (DatanodeStorageInfo storage : getStorages(block)) {
boolean isCorrupt = true;
if (newStorages != null) {
for (DatanodeStorageInfo newStorage : newStorages) {
if (newStorage!= null && storage.equals(newStorage)) {
isCorrupt = false;
break;
}
}
}
if (isCorrupt) {
blockLog.debug("BLOCK* markBlockReplicasAsCorrupt: mark block replica" +
" {} on {} as corrupt because the dn is not in the new committed " +
"storage list.", b, storage.getDatanodeDescriptor());
markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor());
}
}
}
/**
* processFirstBlockReport is intended only for processing "initial" block
* reports, the first block report received from a DN after it registers.
* It just adds all the valid replicas to the datanode, without calculating
* a toRemove list (since there won't be any). It also silently discards
* any invalid blocks, thereby deferring their processing until
* the next block report.
* @param storageInfo - DatanodeStorageInfo that sent the report
* @param report - the initial block report, to be processed
* @throws IOException
*/
void processFirstBlockReport(
final DatanodeStorageInfo storageInfo,
final BlockListAsLongs report) throws IOException {
if (report == null) return;
assert (namesystem.hasWriteLock());
assert (storageInfo.getBlockReportCount() == 0);
for (BlockReportReplica iblk : report) {
ReplicaState reportedState = iblk.getState();
if (LOG.isDebugEnabled()) {
LOG.debug("Initial report of block {} on {} size {} replicaState = {}",
iblk.getBlockName(), storageInfo.getDatanodeDescriptor(),
iblk.getNumBytes(), reportedState);
}
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(iblk)) {
queueReportedBlock(storageInfo, iblk, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
BlockInfo storedBlock = getStoredBlock(iblk);
// If block does not belong to any file, we check if it violates
// an integrity assumption of Name node
if (storedBlock == null) {
bmSafeMode.checkBlocksWithFutureGS(iblk);
continue;
}
// If block is corrupt, mark it and continue to next block.
BlockUCState ucState = storedBlock.getBlockUCState();
BlockToMarkCorrupt c = checkReplicaCorrupt(
iblk, reportedState, storedBlock, ucState,
storageInfo.getDatanodeDescriptor());
if (c != null) {
if (shouldPostponeBlocksFromFuture) {
// In the Standby, we may receive a block report for a file that we
// just have an out-of-date gen-stamp or state for, for example.
queueReportedBlock(storageInfo, iblk, reportedState,
QUEUE_REASON_CORRUPT_STATE);
} else {
markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor());
}
continue;
}
// If block is under construction, add this replica to its list
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
storedBlock.getUnderConstructionFeature()
.addReplicaIfNotPresent(storageInfo, iblk, reportedState);
// OpenFileBlocks only inside snapshots also will be added to safemode
// threshold. So we need to update such blocks to safemode
// refer HDFS-5283
if (namesystem.isInSnapshot(storedBlock.getBlockCollectionId())) {
int numOfReplicas = storedBlock.getUnderConstructionFeature()
.getNumExpectedLocations();
bmSafeMode.incrementSafeBlockCount(numOfReplicas, storedBlock);
}
//and fall through to next clause
}
//add replica if appropriate
if (reportedState == ReplicaState.FINALIZED) {
addStoredBlockImmediate(storedBlock, iblk, storageInfo);
}
}
}
private void reportDiffSorted(DatanodeStorageInfo storageInfo,
Iterable<BlockReportReplica> newReport,
Collection<BlockInfoToAdd> toAdd, // add to DatanodeDescriptor
Collection<BlockInfo> toRemove, // remove from DatanodeDescriptor
Collection<Block> toInvalidate, // should be removed from DN
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
Collection<StatefulBlockInfo> toUC) { // add to under-construction list
// The blocks must be sorted and the storagenodes blocks must be sorted
Iterator<BlockInfo> storageBlocksIterator = storageInfo.getBlockIterator();
DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
BlockInfo storageBlock = null;
for (BlockReportReplica replica : newReport) {
long replicaID = replica.getBlockId();
if (BlockIdManager.isStripedBlockID(replicaID)
&& (!hasNonEcBlockUsingStripedID ||
!blocksMap.containsBlock(replica))) {
replicaID = BlockIdManager.convertToStripedID(replicaID);
}
ReplicaState reportedState = replica.getState();
LOG.debug("Reported block {} on {} size {} replicaState = {}",
replica, dn, replica.getNumBytes(), reportedState);
if (shouldPostponeBlocksFromFuture
&& isGenStampInFuture(replica)) {
queueReportedBlock(storageInfo, replica, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
continue;
}
if (storageBlock == null && storageBlocksIterator.hasNext()) {
storageBlock = storageBlocksIterator.next();
}
do {
int cmp;
if (storageBlock == null ||
(cmp = Long.compare(replicaID, storageBlock.getBlockId())) < 0) {
// Check if block is available in NN but not yet on this storage
BlockInfo nnBlock = blocksMap.getStoredBlock(new Block(replicaID));
if (nnBlock != null) {
reportDiffSortedInner(storageInfo, replica, reportedState,
nnBlock, toAdd, toCorrupt, toUC);
} else {
// Replica not found anywhere so it should be invalidated
toInvalidate.add(new Block(replica));
}
break;
} else if (cmp == 0) {
// Replica matched current storageblock
reportDiffSortedInner(storageInfo, replica, reportedState,
storageBlock, toAdd, toCorrupt, toUC);
storageBlock = null;
} else {
// replica has higher ID than storedBlock
// Remove all stored blocks with IDs lower than replica
do {
toRemove.add(storageBlock);
storageBlock = storageBlocksIterator.hasNext()
? storageBlocksIterator.next() : null;
} while (storageBlock != null &&
Long.compare(replicaID, storageBlock.getBlockId()) > 0);
}
} while (storageBlock != null);
}
// Iterate any remaining blocks that have not been reported and remove them
while (storageBlocksIterator.hasNext()) {
toRemove.add(storageBlocksIterator.next());
}
}
private void reportDiffSortedInner(
final DatanodeStorageInfo storageInfo,
final BlockReportReplica replica, final ReplicaState reportedState,
final BlockInfo storedBlock,
final Collection<BlockInfoToAdd> toAdd,
final Collection<BlockToMarkCorrupt> toCorrupt,
final Collection<StatefulBlockInfo> toUC) {
assert replica != null;
assert storedBlock != null;
DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
BlockUCState ucState = storedBlock.getBlockUCState();
// Block is on the NN
LOG.debug("In memory blockUCState = {}", ucState);
// Ignore replicas already scheduled to be removed from the DN
if (invalidateBlocks.contains(dn, replica)) {
return;
}
BlockToMarkCorrupt c = checkReplicaCorrupt(replica, reportedState,
storedBlock, ucState, dn);
if (c != null) {
if (shouldPostponeBlocksFromFuture) {
// If the block is an out-of-date generation stamp or state,
// but we're the standby, we shouldn't treat it as corrupt,
// but instead just queue it for later processing.
// TODO: Pretty confident this should be s/storedBlock/block below,
// since we should be postponing the info of the reported block, not
// the stored block. See HDFS-6289 for more context.
queueReportedBlock(storageInfo, storedBlock, reportedState,
QUEUE_REASON_CORRUPT_STATE);
} else {
toCorrupt.add(c);
}
} else if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
toUC.add(new StatefulBlockInfo(storedBlock, new Block(replica),
reportedState));
} else if (reportedState == ReplicaState.FINALIZED &&
(storedBlock.findStorageInfo(storageInfo) == -1 ||
corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
// Add replica if appropriate. If the replica was previously corrupt
// but now okay, it might need to be updated.
toAdd.add(new BlockInfoToAdd(storedBlock, new Block(replica)));
}
}
/**
* Queue the given reported block for later processing in the
* standby node. @see PendingDataNodeMessages.
* @param reason a textual reason to report in the debug logs
*/
private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
ReplicaState reportedState, String reason) {
assert shouldPostponeBlocksFromFuture;
LOG.debug("Queueing reported block {} in state {}" +
" from datanode {} for later processing because {}.",
block, reportedState, storageInfo.getDatanodeDescriptor(), reason);
pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
}
/**
* Try to process any messages that were previously queued for the given
* block. This is called from FSEditLogLoader whenever a block's state
* in the namespace has changed or a new block has been created.
*/
public void processQueuedMessagesForBlock(Block b) throws IOException {
Queue<ReportedBlockInfo> queue = pendingDNMessages.takeBlockQueue(b);
if (queue == null) {
// Nothing to re-process
return;
}
processQueuedMessages(queue);
}
private void processQueuedMessages(Iterable<ReportedBlockInfo> rbis)
throws IOException {
for (ReportedBlockInfo rbi : rbis) {
LOG.debug("Processing previouly queued message {}", rbi);
if (rbi.getReportedState() == null) {
// This is a DELETE_BLOCK request
DatanodeStorageInfo storageInfo = rbi.getStorageInfo();
removeStoredBlock(getStoredBlock(rbi.getBlock()),
storageInfo.getDatanodeDescriptor());
} else {
processAndHandleReportedBlock(rbi.getStorageInfo(),
rbi.getBlock(), rbi.getReportedState(), null);
}
}
}
/**
* Process any remaining queued datanode messages after entering
* active state. At this point they will not be re-queued since
* we are the definitive master node and thus should be up-to-date
* with the namespace information.
*/
public void processAllPendingDNMessages() throws IOException {
assert !shouldPostponeBlocksFromFuture :
"processAllPendingDNMessages() should be called after disabling " +
"block postponement.";
int count = pendingDNMessages.count();
if (count > 0) {
LOG.info("Processing {} messages from DataNodes " +
"that were previously queued during standby state", count);
}
processQueuedMessages(pendingDNMessages.takeAll());
assert pendingDNMessages.count() == 0;
}
/**
* The next two methods test the various cases under which we must conclude
* the replica is corrupt, or under construction. These are laid out
* as switch statements, on the theory that it is easier to understand
* the combinatorics of reportedState and ucState that way. It should be
* at least as efficient as boolean expressions.
*
* @return a BlockToMarkCorrupt object, or null if the replica is not corrupt
*/
private BlockToMarkCorrupt checkReplicaCorrupt(
Block reported, ReplicaState reportedState,
BlockInfo storedBlock, BlockUCState ucState,
DatanodeDescriptor dn) {
switch(reportedState) {
case FINALIZED:
switch(ucState) {
case COMPLETE:
case COMMITTED:
if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) {
final long reportedGS = reported.getGenerationStamp();
return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
"block is " + ucState + " and reported genstamp " + reportedGS
+ " does not match genstamp in block map "
+ storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
}
boolean wrongSize;
if (storedBlock.isStriped()) {
assert BlockIdManager.isStripedBlockID(reported.getBlockId());
assert storedBlock.getBlockId() ==
BlockIdManager.convertToStripedID(reported.getBlockId());
BlockInfoStriped stripedBlock = (BlockInfoStriped) storedBlock;
int reportedBlkIdx = BlockIdManager.getBlockIndex(reported);
wrongSize = reported.getNumBytes() != getInternalBlockLength(
stripedBlock.getNumBytes(), stripedBlock.getCellSize(),
stripedBlock.getDataBlockNum(), reportedBlkIdx);
} else {
wrongSize = storedBlock.getNumBytes() != reported.getNumBytes();
}
if (wrongSize) {
return new BlockToMarkCorrupt(new Block(reported), storedBlock,
"block is " + ucState + " and reported length " +
reported.getNumBytes() + " does not match " +
"length in block map " + storedBlock.getNumBytes(),
Reason.SIZE_MISMATCH);
} else {
return null; // not corrupt
}
case UNDER_CONSTRUCTION:
if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
final long reportedGS = reported.getGenerationStamp();
return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
"block is " + ucState + " and reported state " + reportedState
+ ", But reported genstamp " + reportedGS
+ " does not match genstamp in block map "
+ storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
}
return null;
default:
return null;
}
case RBW:
case RWR:
if (!storedBlock.isComplete()) {
return null; // not corrupt
} else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) {
final long reportedGS = reported.getGenerationStamp();
return new BlockToMarkCorrupt(new Block(reported), storedBlock, reportedGS,
"reported " + reportedState + " replica with genstamp " + reportedGS
+ " does not match COMPLETE block's genstamp in block map "
+ storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
} else { // COMPLETE block, same genstamp
if (reportedState == ReplicaState.RBW) {
// If it's a RBW report for a COMPLETE block, it may just be that
// the block report got a little bit delayed after the pipeline
// closed. So, ignore this report, assuming we will get a
// FINALIZED replica later. See HDFS-2791
LOG.info("Received an RBW replica for {} on {}: ignoring it, since "
+ "it is complete with the same genstamp", storedBlock, dn);
return null;
} else {
return new BlockToMarkCorrupt(new Block(reported), storedBlock,
"reported replica has invalid state " + reportedState,
Reason.INVALID_STATE);
}
}
case RUR: // should not be reported
case TEMPORARY: // should not be reported
default:
String msg = "Unexpected replica state " + reportedState
+ " for block: " + storedBlock +
" on " + dn + " size " + storedBlock.getNumBytes();
// log here at WARN level since this is really a broken HDFS invariant
LOG.warn("{}", msg);
return new BlockToMarkCorrupt(new Block(reported), storedBlock, msg,
Reason.INVALID_STATE);
}
}
private boolean isBlockUnderConstruction(BlockInfo storedBlock,
BlockUCState ucState, ReplicaState reportedState) {
switch(reportedState) {
case FINALIZED:
switch(ucState) {
case UNDER_CONSTRUCTION:
case UNDER_RECOVERY:
return true;
default:
return false;
}
case RBW:
case RWR:
return (!storedBlock.isComplete());
case RUR: // should not be reported
case TEMPORARY: // should not be reported
default:
return false;
}
}
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
DatanodeStorageInfo storageInfo) throws IOException {
BlockInfo block = ucBlock.storedBlock;
block.getUnderConstructionFeature().addReplicaIfNotPresent(
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
(block.findStorageInfo(storageInfo) < 0)) {
addStoredBlock(block, ucBlock.reportedBlock, storageInfo, null, true);
}
}
/**
* Faster version of {@link #addStoredBlock},
* intended for use with initial block report at startup. If not in startup
* safe mode, will call standard addStoredBlock(). Assumes this method is
* called "immediately" so there is no need to refresh the storedBlock from
* blocksMap. Doesn't handle low redundancy/extra redundancy, or worry about
* pendingReplications or corruptReplicas, because it's in startup safe mode.
* Doesn't log every block, because there are typically millions of them.
*
* @throws IOException
*/
private void addStoredBlockImmediate(BlockInfo storedBlock, Block reported,
DatanodeStorageInfo storageInfo)
throws IOException {
assert (storedBlock != null && namesystem.hasWriteLock());
if (!namesystem.isInStartupSafeMode()
|| isPopulatingReplQueues()) {
addStoredBlock(storedBlock, reported, storageInfo, null, false);
return;
}
// just add it
AddBlockResult result = storageInfo.addBlockInitial(storedBlock, reported);
// Now check for completion of blocks and safe block count
int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& hasMinStorage(storedBlock, numCurrentReplica)) {
completeBlock(storedBlock, null, false);
} else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that.
// In the case that the block just became complete above, completeBlock()
// handles the safe block count maintenance.
bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock);
}
}
/**
* Modify (block-->datanode) map. Remove block from set of
* needed reconstruction if this takes care of the problem.
* @return the block that is stored in blocksMap.
*/
private Block addStoredBlock(final BlockInfo block,
final Block reportedBlock,
DatanodeStorageInfo storageInfo,
DatanodeDescriptor delNodeHint,
boolean logEveryBlock)
throws IOException {
assert block != null && namesystem.hasWriteLock();
BlockInfo storedBlock;
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
if (!block.isComplete()) {
//refresh our copy in case the block got completed in another thread
storedBlock = getStoredBlock(block);
} else {
storedBlock = block;
}
if (storedBlock == null || storedBlock.isDeleted()) {
// If this block does not belong to anyfile, then we are done.
blockLog.debug("BLOCK* addStoredBlock: {} on {} size {} but it does not" +
" belong to any file", block, node, block.getNumBytes());
// we could add this block to invalidate set of this datanode.
// it will happen in next block report otherwise.
return block;
}
// add block to the datanode
AddBlockResult result = storageInfo.addBlock(storedBlock, reportedBlock);
int curReplicaDelta;
if (result == AddBlockResult.ADDED) {
curReplicaDelta = (node.isDecommissioned()) ? 0 : 1;
if (logEveryBlock) {
blockLog.debug("BLOCK* addStoredBlock: {} is added to {} (size={})",
node, storedBlock, storedBlock.getNumBytes());
}
} else if (result == AddBlockResult.REPLACED) {
curReplicaDelta = 0;
blockLog.warn("BLOCK* addStoredBlock: block {} moved to storageType " +
"{} on node {}", storedBlock, storageInfo.getStorageType(), node);
} else {
// if the same block is added again and the replica was corrupt
// previously because of a wrong gen stamp, remove it from the
// corrupt block list.
corruptReplicas.removeFromCorruptReplicasMap(block, node,
Reason.GENSTAMP_MISMATCH);
curReplicaDelta = 0;
blockLog.debug("BLOCK* addStoredBlock: Redundant addStoredBlock request"
+ " received for {} on node {} size {}", storedBlock, node,
storedBlock.getNumBytes());
}
// Now check for completion of blocks and safe block count
NumberReplicas num = countNodes(storedBlock);
int numLiveReplicas = num.liveReplicas();
int pendingNum = pendingReconstruction.getNumReplicas(storedBlock);
int numCurrentReplica = numLiveReplicas + pendingNum;
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
hasMinStorage(storedBlock, numLiveReplicas)) {
addExpectedReplicasToPending(storedBlock);
completeBlock(storedBlock, null, false);
} else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that
// Is no-op if not in safe mode.
// In the case that the block just became complete above, completeBlock()
// handles the safe block count maintenance.
bmSafeMode.incrementSafeBlockCount(numCurrentReplica, storedBlock);
}
// if block is still under construction, then done for now
if (!storedBlock.isCompleteOrCommitted()) {
return storedBlock;
}
// do not try to handle extra/low redundancy blocks during first safe mode
if (!isPopulatingReplQueues()) {
return storedBlock;
}
// handle low redundancy/extra redundancy
short fileRedundancy = getExpectedRedundancyNum(storedBlock);
if (!isNeededReconstruction(storedBlock, num, pendingNum)) {
neededReconstruction.remove(storedBlock, numCurrentReplica,
num.readOnlyReplicas(), num.outOfServiceReplicas(), fileRedundancy);
} else {
updateNeededReconstructions(storedBlock, curReplicaDelta, 0);
}
if (shouldProcessExtraRedundancy(num, fileRedundancy)) {
processExtraRedundancyBlock(storedBlock, fileRedundancy, node,
delNodeHint);
}
// If the file redundancy has reached desired value
// we can remove any corrupt replicas the block may have
int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
int numCorruptNodes = num.corruptReplicas();
if (numCorruptNodes != corruptReplicasCount) {
LOG.warn("Inconsistent number of corrupt replicas for {}" +
". blockMap has {} but corrupt replicas map has {}",
storedBlock, numCorruptNodes, corruptReplicasCount);
}
if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileRedundancy)) {
invalidateCorruptReplicas(storedBlock, reportedBlock, num);
}
return storedBlock;
}
// If there is any maintenance replica, we don't have to restore
// the condition of live + maintenance == expected. We allow
// live + maintenance >= expected. The extra redundancy will be removed
// when the maintenance node changes to live.
private boolean shouldProcessExtraRedundancy(NumberReplicas num,
int expectedNum) {
final int numCurrent = num.liveReplicas();
return numCurrent > expectedNum ||
(numCurrent == expectedNum && num.redundantInternalBlocks() > 0);
}
/**
* Invalidate corrupt replicas.
* <p>
* This will remove the replicas from the block's location list,
* add them to {@link #invalidateBlocks} so that they could be further
* deleted from the respective data-nodes,
* and remove the block from corruptReplicasMap.
* <p>
* This method should be called when the block has sufficient
* number of live replicas.
*
* @param blk Block whose corrupt replicas need to be invalidated
*/
private void invalidateCorruptReplicas(BlockInfo blk, Block reported,
NumberReplicas numberReplicas) {
Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
boolean removedFromBlocksMap = true;
if (nodes == null)
return;
// make a copy of the array of nodes in order to avoid
// ConcurrentModificationException, when the block is removed from the node
DatanodeDescriptor[] nodesCopy =
nodes.toArray(new DatanodeDescriptor[nodes.size()]);
for (DatanodeDescriptor node : nodesCopy) {
try {
if (!invalidateBlock(new BlockToMarkCorrupt(reported, blk, null,
Reason.ANY), node, numberReplicas)) {
removedFromBlocksMap = false;
}
} catch (IOException e) {
blockLog.debug("invalidateCorruptReplicas error in deleting bad block"
+ " {} on {}", blk, node, e);
removedFromBlocksMap = false;
}
}
// Remove the block from corruptReplicasMap
if (removedFromBlocksMap) {
corruptReplicas.removeFromCorruptReplicasMap(blk);
}
}
/**
* For each block in the name-node verify whether it belongs to any file,
* extra or low redundancy. Place it into the respective queue.
*/
public void processMisReplicatedBlocks() {
assert namesystem.hasWriteLock();
stopReconstructionInitializer();
neededReconstruction.clear();
reconstructionQueuesInitializer = new Daemon() {
@Override
public void run() {
try {
processMisReplicatesAsync();
} catch (InterruptedException ie) {
LOG.info("Interrupted while processing reconstruction queues.");
} catch (Exception e) {
LOG.error("Error while processing reconstruction queues async", e);
}
}
};
reconstructionQueuesInitializer
.setName("Reconstruction Queue Initializer");
reconstructionQueuesInitializer.start();
}
/*
* Stop the ongoing initialisation of reconstruction queues
*/
private void stopReconstructionInitializer() {
if (reconstructionQueuesInitializer != null) {
reconstructionQueuesInitializer.interrupt();
try {
reconstructionQueuesInitializer.join();
} catch (final InterruptedException e) {
LOG.warn("Interrupted while waiting for "
+ "reconstructionQueueInitializer. Returning..");
return;
} finally {
reconstructionQueuesInitializer = null;
}
}
}
/*
* Since the BlocksMapGset does not throw the ConcurrentModificationException
* and supports further iteration after modification to list, there is a
* chance of missing the newly added block while iterating. Since every
* addition to blocksMap will check for mis-replication, missing mis-replication
* check for new blocks will not be a problem.
*/
private void processMisReplicatesAsync() throws InterruptedException {
long nrInvalid = 0, nrOverReplicated = 0;
long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0;
long startTimeMisReplicatedScan = Time.monotonicNow();
Iterator<BlockInfo> blocksItr = blocksMap.getBlocks().iterator();
long totalBlocks = blocksMap.size();
reconstructionQueuesInitProgress = 0;
long totalProcessed = 0;
long sleepDuration =
Math.max(1, Math.min(numBlocksPerIteration/1000, 10000));
while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) {
int processed = 0;
namesystem.writeLockInterruptibly();
try {
while (processed < numBlocksPerIteration && blocksItr.hasNext()) {
BlockInfo block = blocksItr.next();
MisReplicationResult res = processMisReplicatedBlock(block);
switch (res) {
case UNDER_REPLICATED:
LOG.trace("under replicated block {}: {}", block, res);
nrUnderReplicated++;
break;
case OVER_REPLICATED:
LOG.trace("over replicated block {}: {}", block, res);
nrOverReplicated++;
break;
case INVALID:
LOG.trace("invalid block {}: {}", block, res);
nrInvalid++;
break;
case POSTPONE:
LOG.trace("postpone block {}: {}", block, res);
nrPostponed++;
postponeBlock(block);
break;
case UNDER_CONSTRUCTION:
LOG.trace("under construction block {}: {}", block, res);
nrUnderConstruction++;
break;
case OK:
break;
default:
throw new AssertionError("Invalid enum value: " + res);
}
processed++;
}
totalProcessed += processed;
// there is a possibility that if any of the blocks deleted/added during
// initialisation, then progress might be different.
reconstructionQueuesInitProgress = Math.min((double) totalProcessed
/ totalBlocks, 1.0);
if (!blocksItr.hasNext()) {
LOG.info("Total number of blocks = {}", blocksMap.size());
LOG.info("Number of invalid blocks = {}", nrInvalid);
LOG.info("Number of under-replicated blocks = {}", nrUnderReplicated);
LOG.info("Number of over-replicated blocks = {}{}", nrOverReplicated,
((nrPostponed > 0) ? (" (" + nrPostponed + " postponed)") : ""));
LOG.info("Number of blocks being written = {}",
nrUnderConstruction);
NameNode.stateChangeLog
.info("STATE* Replication Queue initialization "
+ "scan for invalid, over- and under-replicated blocks "
+ "completed in "
+ (Time.monotonicNow() - startTimeMisReplicatedScan)
+ " msec");
break;
}
} finally {
namesystem.writeUnlock();
// Make sure it is out of the write lock for sufficiently long time.
Thread.sleep(sleepDuration);
}
}
if (Thread.currentThread().isInterrupted()) {
LOG.info("Interrupted while processing replication queues.");
}
}
/**
* Get the progress of the reconstruction queues initialisation
*
* @return Returns values between 0 and 1 for the progress.
*/
public double getReconstructionQueuesInitProgress() {
return reconstructionQueuesInitProgress;
}
/**
* Get the value of whether there are any non-EC blocks using StripedID.
*
* @return Returns the value of whether there are any non-EC blocks using StripedID.
*/
public boolean hasNonEcBlockUsingStripedID(){
return hasNonEcBlockUsingStripedID;
}
/**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
*/
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
if (block.isDeleted()) {
// block does not belong to any file
addToInvalidates(block);
return MisReplicationResult.INVALID;
}
if (!block.isComplete()) {
// Incomplete blocks are never considered mis-replicated --
// they'll be reached when they are completed or recovered.
return MisReplicationResult.UNDER_CONSTRUCTION;
}
// calculate current redundancy
short expectedRedundancy = getExpectedRedundancyNum(block);
NumberReplicas num = countNodes(block);
final int numCurrentReplica = num.liveReplicas();
// add to low redundancy queue if need to be
if (isNeededReconstruction(block, num)) {
if (neededReconstruction.add(block, numCurrentReplica,
num.readOnlyReplicas(), num.outOfServiceReplicas(),
expectedRedundancy)) {
return MisReplicationResult.UNDER_REPLICATED;
}
}
if (shouldProcessExtraRedundancy(num, expectedRedundancy)) {
if (num.replicasOnStaleNodes() > 0) {
// If any of the replicas of this block are on nodes that are
// considered "stale", then these replicas may in fact have
// already been deleted. So, we cannot safely act on the
// over-replication until a later point in time, when
// the "stale" nodes have block reported.
return MisReplicationResult.POSTPONE;
}
// extra redundancy block
processExtraRedundancyBlock(block, expectedRedundancy, null, null);
return MisReplicationResult.OVER_REPLICATED;
}
return MisReplicationResult.OK;
}
/** Set replication for the blocks. */
public void setReplication(
final short oldRepl, final short newRepl, final BlockInfo b) {
if (newRepl == oldRepl) {
return;
}
// update neededReconstruction priority queues
b.setReplication(newRepl);
NumberReplicas num = countNodes(b);
updateNeededReconstructions(b, 0, newRepl - oldRepl);
if (shouldProcessExtraRedundancy(num, newRepl)) {
processExtraRedundancyBlock(b, newRepl, null, null);
}
}
/**
* Find how many of the containing nodes are "extra", if any.
* If there are any extras, call chooseExcessRedundancies() to
* mark them in the excessRedundancyMap.
*/
private void processExtraRedundancyBlock(final BlockInfo block,
final short replication, final DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) {
assert namesystem.hasWriteLock();
if (addedNode == delNodeHint) {
delNodeHint = null;
}
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
.getNodes(block);
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (storage.areBlockContentsStale()) {
LOG.trace("BLOCK* processExtraRedundancyBlock: Postponing {}"
+ " since storage {} does not yet have up-to-date information.",
block, storage);
postponeBlock(block);
return;
}
if (!isExcess(cur, block)) {
if (cur.isInService()) {
// exclude corrupt replicas
if (corruptNodes == null || !corruptNodes.contains(cur)) {
nonExcess.add(storage);
}
}
}
}
chooseExcessRedundancies(nonExcess, block, replication, addedNode,
delNodeHint);
}
private void chooseExcessRedundancies(
final Collection<DatanodeStorageInfo> nonExcess,
BlockInfo storedBlock, short replication,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint) {
assert namesystem.hasWriteLock();
// first form a rack to datanodes map and
BlockCollection bc = getBlockCollection(storedBlock);
if (storedBlock.isStriped()) {
chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
} else {
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
bc.getStoragePolicyID());
final List<StorageType> excessTypes = storagePolicy.chooseExcess(
replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication,
addedNode, delNodeHint, excessTypes);
}
}
/**
* We want sufficient redundancy for the block, but we now have too many.
* In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that:
*
* srcNodes.size() - dstNodes.size() == replication
*
* We pick node that make sure that replicas are spread across racks and
* also try hard to pick one with least free space.
* The algorithm is first to pick a node with least free space from nodes
* that are on a rack holding more than one replicas of the block.
* So removing such a replica won't remove a rack.
* If no such a node is available,
* then pick a node with least free space
*/
private void chooseExcessRedundancyContiguous(
final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock,
short replication, DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint, List<StorageType> excessTypes) {
BlockPlacementPolicy replicator = placementPolicies.getPolicy(CONTIGUOUS);
List<DatanodeStorageInfo> replicasToDelete = replicator
.chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
addedNode, delNodeHint);
for (DatanodeStorageInfo chosenReplica : replicasToDelete) {
processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock);
}
}
/**
* We want block group has every internal block, but we have redundant
* internal blocks (which have the same index).
* In this method, we delete the redundant internal blocks until only one
* left for each index.
*
* The block placement policy will make sure that the left internal blocks are
* spread across racks and also try hard to pick one with least free space.
*/
private void chooseExcessRedundancyStriped(BlockCollection bc,
final Collection<DatanodeStorageInfo> nonExcess,
BlockInfo storedBlock,
DatanodeDescriptor delNodeHint) {
assert storedBlock instanceof BlockInfoStriped;
BlockInfoStriped sblk = (BlockInfoStriped) storedBlock;
short groupSize = sblk.getTotalBlockNum();
// find all duplicated indices
BitSet found = new BitSet(groupSize); //indices found
BitSet duplicated = new BitSet(groupSize); //indices found more than once
HashMap<DatanodeStorageInfo, Integer> storage2index = new HashMap<>();
for (DatanodeStorageInfo storage : nonExcess) {
int index = sblk.getStorageBlockIndex(storage);
assert index >= 0;
if (found.get(index)) {
duplicated.set(index);
}
found.set(index);
storage2index.put(storage, index);
}
// use delHint only if delHint is duplicated
final DatanodeStorageInfo delStorageHint =
DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint);
if (delStorageHint != null) {
Integer index = storage2index.get(delStorageHint);
if (index != null && duplicated.get(index)) {
processChosenExcessRedundancy(nonExcess, delStorageHint, storedBlock);
}
}
// cardinality of found indicates the expected number of internal blocks
final int numOfTarget = found.cardinality();
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
bc.getStoragePolicyID());
final List<StorageType> excessTypes = storagePolicy.chooseExcess(
(short) numOfTarget, DatanodeStorageInfo.toStorageTypes(nonExcess));
if (excessTypes.isEmpty()) {
LOG.warn("excess types chosen for block {} among storages {} is empty",
storedBlock, nonExcess);
return;
}
BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(STRIPED);
// for each duplicated index, delete some replicas until only one left
for (int targetIndex = duplicated.nextSetBit(0); targetIndex >= 0;
targetIndex = duplicated.nextSetBit(targetIndex + 1)) {
List<DatanodeStorageInfo> candidates = new ArrayList<>();
for (DatanodeStorageInfo storage : nonExcess) {
int index = storage2index.get(storage);
if (index == targetIndex) {
candidates.add(storage);
}
}
if (candidates.size() > 1) {
List<DatanodeStorageInfo> replicasToDelete = placementPolicy
.chooseReplicasToDelete(nonExcess, candidates, (short) 1,
excessTypes, null, null);
for (DatanodeStorageInfo chosen : replicasToDelete) {
processChosenExcessRedundancy(nonExcess, chosen, storedBlock);
candidates.remove(chosen);
}
}
duplicated.clear(targetIndex);
}
}
private void processChosenExcessRedundancy(
final Collection<DatanodeStorageInfo> nonExcess,
final DatanodeStorageInfo chosen, BlockInfo storedBlock) {
nonExcess.remove(chosen);
excessRedundancyMap.add(chosen.getDatanodeDescriptor(), storedBlock);
//
// The 'excessblocks' tracks blocks until we get confirmation
// that the datanode has deleted them; the only way we remove them
// is when we get a "removeBlock" message.
//
// The 'invalidate' list is used to inform the datanode the block
// should be deleted. Items are removed from the invalidate list
// upon giving instructions to the datanodes.
//
final Block blockToInvalidate = getBlockOnStorage(storedBlock, chosen);
addToInvalidates(blockToInvalidate, chosen.getDatanodeDescriptor());
blockLog.debug("BLOCK* chooseExcessRedundancies: "
+ "({}, {}) is added to invalidated blocks set", chosen, storedBlock);
}
private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
DatanodeDescriptor node) {
if (shouldPostponeBlocksFromFuture && isGenStampInFuture(block)) {
queueReportedBlock(storageInfo, block, null,
QUEUE_REASON_FUTURE_GENSTAMP);
return;
}
removeStoredBlock(getStoredBlock(block), node);
}
/**
* Modify (block-->datanode) map. Possibly generate replication tasks, if the
* removed block is still valid.
*/
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
assert (namesystem.hasWriteLock());
{
if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
blockLog.debug("BLOCK* removeStoredBlock: {} has already been" +
" removed from node {}", storedBlock, node);
return;
}
CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks()
.get(new CachedBlock(storedBlock.getBlockId(), (short) 0, false));
if (cblock != null) {
boolean removed = false;
removed |= node.getPendingCached().remove(cblock);
removed |= node.getCached().remove(cblock);
removed |= node.getPendingUncached().remove(cblock);
if (removed) {
blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching "
+ "related lists on node {}", storedBlock, node);
}
}
//
// It's possible that the block was removed because of a datanode
// failure. If the block is still valid, check if replication is
// necessary. In that case, put block on a possibly-will-
// be-replicated list.
//
if (!storedBlock.isDeleted()) {
bmSafeMode.decrementSafeBlockCount(storedBlock);
updateNeededReconstructions(storedBlock, -1, 0);
}
excessRedundancyMap.remove(node, storedBlock);
corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node);
}
}
private void removeStaleReplicas(List<ReplicaUnderConstruction> staleReplicas,
BlockInfo block) {
for (ReplicaUnderConstruction r : staleReplicas) {
removeStoredBlock(block,
r.getExpectedStorageLocation().getDatanodeDescriptor());
NameNode.blockStateChangeLog
.debug("BLOCK* Removing stale replica {}" + " of {}", r,
Block.toString(r));
}
}
/**
* Get all valid locations of the block & add the block to results
* @return the length of the added block; 0 if the block is not added. If the
* added block is a block group, return its approximate internal block size
*/
private long addBlock(BlockInfo block, List<BlockWithLocations> results) {
final List<DatanodeStorageInfo> locations = getValidLocations(block);
if(locations.size() == 0) {
return 0;
} else {
final String[] datanodeUuids = new String[locations.size()];
final String[] storageIDs = new String[datanodeUuids.length];
final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
for(int i = 0; i < locations.size(); i++) {
final DatanodeStorageInfo s = locations.get(i);
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
storageIDs[i] = s.getStorageID();
storageTypes[i] = s.getStorageType();
}
BlockWithLocations blkWithLocs = new BlockWithLocations(block,
datanodeUuids, storageIDs, storageTypes);
if(block.isStriped()) {
BlockInfoStriped blockStriped = (BlockInfoStriped) block;
byte[] indices = new byte[locations.size()];
for (int i = 0; i < locations.size(); i++) {
indices[i] =
(byte) blockStriped.getStorageBlockIndex(locations.get(i));
}
results.add(new StripedBlockWithLocations(blkWithLocs, indices,
blockStriped.getDataBlockNum(), blockStriped.getCellSize()));
// approximate size
return block.getNumBytes() / blockStriped.getDataBlockNum();
}else{
results.add(blkWithLocs);
return block.getNumBytes();
}
}
}
/**
* The given node is reporting that it received a certain block.
*/
@VisibleForTesting
public void addBlock(DatanodeStorageInfo storageInfo, Block block,
String delHint) throws IOException {
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
// Decrement number of blocks scheduled to this datanode.
// for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with
// RECEIVED_BLOCK), we currently also decrease the approximate number.
node.decrementBlocksScheduled(storageInfo.getStorageType());
// get the deletion hint node
DatanodeDescriptor delHintNode = null;
if (delHint != null && delHint.length() != 0) {
delHintNode = datanodeManager.getDatanode(delHint);
if (delHintNode == null) {
blockLog.warn("BLOCK* blockReceived: {} is expected to be removed " +
"from an unrecorded node {}", block, delHint);
}
}
//
// Modify the blocks->datanode map and node's map.
//
BlockInfo storedBlock = getStoredBlock(block);
if (storedBlock != null &&
block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
if (pendingReconstruction.decrement(storedBlock, node)) {
NameNode.getNameNodeMetrics().incSuccessfulReReplications();
}
}
processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
delHintNode);
}
private void processAndHandleReportedBlock(
DatanodeStorageInfo storageInfo, Block block,
ReplicaState reportedState, DatanodeDescriptor delHintNode)
throws IOException {
final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
LOG.debug("Reported block {} on {} size {} replicaState = {}",
block, node, block.getNumBytes(), reportedState);
if (shouldPostponeBlocksFromFuture &&
isGenStampInFuture(block)) {
queueReportedBlock(storageInfo, block, reportedState,
QUEUE_REASON_FUTURE_GENSTAMP);
return;
}
// find block by blockId
BlockInfo storedBlock = getStoredBlock(block);
if(storedBlock == null) {
// If blocksMap does not contain reported block id,
// the replica should be removed from the data-node.
blockLog.debug("BLOCK* addBlock: block {} on node {} size {} does not " +
"belong to any file", block, node, block.getNumBytes());
addToInvalidates(new Block(block), node);
return;
}
BlockUCState ucState = storedBlock.getBlockUCState();
// Block is on the NN
LOG.debug("In memory blockUCState = {}", ucState);
// Ignore replicas already scheduled to be removed from the DN
if(invalidateBlocks.contains(node, block)) {
return;
}
BlockToMarkCorrupt c = checkReplicaCorrupt(
block, reportedState, storedBlock, ucState, node);
if (c != null) {
if (shouldPostponeBlocksFromFuture) {
// If the block is an out-of-date generation stamp or state,
// but we're the standby, we shouldn't treat it as corrupt,
// but instead just queue it for later processing.
// TODO: Pretty confident this should be s/storedBlock/block below,
// since we should be postponing the info of the reported block, not
// the stored block. See HDFS-6289 for more context.
queueReportedBlock(storageInfo, storedBlock, reportedState,
QUEUE_REASON_CORRUPT_STATE);
} else {
markBlockAsCorrupt(c, storageInfo, node);
}
return;
}
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
addStoredBlockUnderConstruction(
new StatefulBlockInfo(storedBlock, new Block(block), reportedState),
storageInfo);
return;
}
// Add replica if appropriate. If the replica was previously corrupt
// but now okay, it might need to be updated.
if (reportedState == ReplicaState.FINALIZED
&& (storedBlock.findStorageInfo(storageInfo) == -1 ||
corruptReplicas.isReplicaCorrupt(storedBlock, node))) {
addStoredBlock(storedBlock, block, storageInfo, delHintNode, true);
}
}
/**
* The given node is reporting incremental information about some blocks.
* This includes blocks that are starting to be received, completed being
* received, or deleted.
*
* This method must be called with FSNamesystem lock held.
*/
public void processIncrementalBlockReport(final DatanodeID nodeID,
final StorageReceivedDeletedBlocks srdb) throws IOException {
assert namesystem.hasWriteLock();
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
if (node == null || !node.isRegistered()) {
blockLog.warn("BLOCK* processIncrementalBlockReport"
+ " is received from dead or unregistered node {}", nodeID);
throw new IOException(
"Got incremental block report from unregistered or dead node");
}
boolean successful = false;
try {
processIncrementalBlockReport(node, srdb);
successful = true;
} finally {
if (!successful) {
node.setForceRegistration(true);
}
}
}
private void processIncrementalBlockReport(final DatanodeDescriptor node,
final StorageReceivedDeletedBlocks srdb) throws IOException {
DatanodeStorageInfo storageInfo =
node.getStorageInfo(srdb.getStorage().getStorageID());
if (storageInfo == null) {
// The DataNode is reporting an unknown storage. Usually the NN learns
// about new storages from heartbeats but during NN restart we may
// receive a block report or incremental report before the heartbeat.
// We must handle this for protocol compatibility. This issue was
// uncovered by HDFS-6094.
storageInfo = node.updateStorage(srdb.getStorage());
}
int received = 0;
int deleted = 0;
int receiving = 0;
for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
switch (rdbi.getStatus()) {
case DELETED_BLOCK:
removeStoredBlock(storageInfo, rdbi.getBlock(), node);
deleted++;
break;
case RECEIVED_BLOCK:
addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints());
received++;
break;
case RECEIVING_BLOCK:
receiving++;
processAndHandleReportedBlock(storageInfo, rdbi.getBlock(),
ReplicaState.RBW, null);
break;
default:
String msg =
"Unknown block status code reported by " + node +
": " + rdbi;
blockLog.warn(msg);
assert false : msg; // if assertions are enabled, throw.
break;
}
blockLog.debug("BLOCK* block {}: {} is received from {}",
rdbi.getStatus(), rdbi.getBlock(), node);
}
blockLog.debug("*BLOCK* NameNode.processIncrementalBlockReport: from "
+ "{} receiving: {}, received: {}, deleted: {}", node, receiving,
received, deleted);
}
/**
* Return the number of nodes hosting a given block, grouped
* by the state of those replicas.
* For a striped block, this includes nodes storing blocks belonging to the
* striped block group. But note we exclude duplicated internal block replicas
* for calculating {@link NumberReplicas#liveReplicas}.
*/
public NumberReplicas countNodes(BlockInfo b) {
return countNodes(b, false);
}
NumberReplicas countNodes(BlockInfo b, boolean inStartupSafeMode) {
NumberReplicas numberReplicas = new NumberReplicas();
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
if (b.isStriped()) {
countReplicasForStripedBlock(numberReplicas, (BlockInfoStriped) b,
nodesCorrupt, inStartupSafeMode);
} else {
for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
checkReplicaOnStorage(numberReplicas, b, storage, nodesCorrupt,
inStartupSafeMode);
}
}
return numberReplicas;
}
private StoredReplicaState checkReplicaOnStorage(NumberReplicas counters,
BlockInfo b, DatanodeStorageInfo storage,
Collection<DatanodeDescriptor> nodesCorrupt, boolean inStartupSafeMode) {
final StoredReplicaState s;
if (storage.getState() == State.NORMAL) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if (nodesCorrupt != null && nodesCorrupt.contains(node)) {
s = StoredReplicaState.CORRUPT;
} else if (inStartupSafeMode) {
s = StoredReplicaState.LIVE;
counters.add(s, 1);
return s;
} else if (node.isDecommissionInProgress()) {
s = StoredReplicaState.DECOMMISSIONING;
} else if (node.isDecommissioned()) {
s = StoredReplicaState.DECOMMISSIONED;
} else if (node.isMaintenance()) {
if (node.isInMaintenance() || !node.isAlive()) {
s = StoredReplicaState.MAINTENANCE_NOT_FOR_READ;
} else {
s = StoredReplicaState.MAINTENANCE_FOR_READ;
}
} else if (isExcess(node, b)) {
s = StoredReplicaState.EXCESS;
} else {
s = StoredReplicaState.LIVE;
}
counters.add(s, 1);
if (storage.areBlockContentsStale()) {
counters.add(StoredReplicaState.STALESTORAGE, 1);
}
} else if (!inStartupSafeMode &&
storage.getState() == State.READ_ONLY_SHARED) {
s = StoredReplicaState.READONLY;
counters.add(s, 1);
} else {
s = null;
}
return s;
}
/**
* For a striped block, it is possible it contains full number of internal
* blocks (i.e., 9 by default), but with duplicated replicas of the same
* internal block. E.g., for the following list of internal blocks
* b0, b0, b1, b2, b3, b4, b5, b6, b7
* we have 9 internal blocks but we actually miss b8.
* We should use this method to detect the above scenario and schedule
* necessary reconstruction.
*/
private void countReplicasForStripedBlock(NumberReplicas counters,
BlockInfoStriped block, Collection<DatanodeDescriptor> nodesCorrupt,
boolean inStartupSafeMode) {
BitSet bitSet = new BitSet(block.getTotalBlockNum());
for (StorageAndBlockIndex si : block.getStorageAndIndexInfos()) {
StoredReplicaState state = checkReplicaOnStorage(counters, block,
si.getStorage(), nodesCorrupt, inStartupSafeMode);
if (state == StoredReplicaState.LIVE) {
if (!bitSet.get(si.getBlockIndex())) {
bitSet.set(si.getBlockIndex());
} else {
counters.subtract(StoredReplicaState.LIVE, 1);
counters.add(StoredReplicaState.REDUNDANT, 1);
}
}
}
}
@VisibleForTesting
int getExcessSize4Testing(String dnUuid) {
return excessRedundancyMap.getSize4Testing(dnUuid);
}
public boolean isExcess(DatanodeDescriptor dn, BlockInfo blk) {
return excessRedundancyMap.contains(dn, blk);
}
/**
* Simpler, faster form of {@link #countNodes} that only returns the number
* of live nodes. If in startup safemode (or its 30-sec extension period),
* then it gains speed by ignoring issues of excess replicas or nodes
* that are decommissioned or in process of becoming decommissioned.
* If not in startup, then it calls {@link #countNodes} instead.
*
* @param b - the block being tested
* @return count of live nodes for this block
*/
int countLiveNodes(BlockInfo b) {
final boolean inStartupSafeMode = namesystem.isInStartupSafeMode();
return countNodes(b, inStartupSafeMode).liveReplicas();
}
/**
* On putting the node in service, check if the node has excess replicas.
* If there are any excess replicas, call processExtraRedundancyBlock().
* Process extra redundancy blocks only when active NN is out of safe mode.
*/
void processExtraRedundancyBlocksOnInService(
final DatanodeDescriptor srcNode) {
if (!isPopulatingReplQueues()) {
return;
}
final Iterator<BlockInfo> it = srcNode.getBlockIterator();
int numExtraRedundancy = 0;
while(it.hasNext()) {
final BlockInfo block = it.next();
if (block.isDeleted()) {
//Orphan block, will be handled eventually, skip
continue;
}
int expectedReplication = this.getExpectedRedundancyNum(block);
NumberReplicas num = countNodes(block);
if (shouldProcessExtraRedundancy(num, expectedReplication)) {
// extra redundancy block
processExtraRedundancyBlock(block, (short) expectedReplication, null,
null);
numExtraRedundancy++;
}
}
LOG.info("Invalidated {} extra redundancy blocks on {} after "
+ "it is in service", numExtraRedundancy, srcNode);
}
/**
* Returns whether a node can be safely decommissioned or in maintenance
* based on its liveness. Dead nodes cannot always be safely decommissioned
* or in maintenance.
*/
boolean isNodeHealthyForDecommissionOrMaintenance(DatanodeDescriptor node) {
if (!node.checkBlockReportReceived()) {
LOG.info("Node {} hasn't sent its first block report.", node);
return false;
}
if (node.isAlive()) {
return true;
}
updateState();
if (pendingReconstructionBlocksCount == 0 &&
lowRedundancyBlocksCount == 0) {
LOG.info("Node {} is dead and there are no low redundancy" +
" blocks or blocks pending reconstruction. Safe to decommission or",
" put in maintenance.", node);
return true;
}
LOG.warn("Node {} is dead " +
"while in {}. Cannot be safely " +
"decommissioned or be in maintenance since there is risk of reduced " +
"data durability or data loss. Either restart the failed node or " +
"force decommissioning or maintenance by removing, calling " +
"refreshNodes, then re-adding to the excludes or host config files.",
node, node.getAdminState());
return false;
}
public int getActiveBlockCount() {
return blocksMap.size();
}
public DatanodeStorageInfo[] getStorages(BlockInfo block) {
final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()];
int i = 0;
for(DatanodeStorageInfo s : blocksMap.getStorages(block)) {
storages[i++] = s;
}
return storages;
}
/** @return an iterator of the datanodes. */
public Iterable<DatanodeStorageInfo> getStorages(final Block block) {
return blocksMap.getStorages(block);
}
public int getTotalBlocks() {
return blocksMap.size();
}
public void removeBlock(BlockInfo block) {
assert namesystem.hasWriteLock();
// No need to ACK blocks that are being removed entirely
// from the namespace, since the removal of the associated
// file already removes them from the block map below.
block.setNumBytes(BlockCommand.NO_ACK);
addToInvalidates(block);
removeBlockFromMap(block);
// Remove the block from pendingReconstruction and neededReconstruction
pendingReconstruction.remove(block);
neededReconstruction.remove(block, LowRedundancyBlocks.LEVEL);
postponedMisreplicatedBlocks.remove(block);
}
public BlockInfo getStoredBlock(Block block) {
if (!BlockIdManager.isStripedBlockID(block.getBlockId())) {
return blocksMap.getStoredBlock(block);
}
if (!hasNonEcBlockUsingStripedID) {
return blocksMap.getStoredBlock(
new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}
BlockInfo info = blocksMap.getStoredBlock(block);
if (info != null) {
return info;
}
return blocksMap.getStoredBlock(
new Block(BlockIdManager.convertToStripedID(block.getBlockId())));
}
public void updateLastBlock(BlockInfo lastBlock, ExtendedBlock newBlock) {
lastBlock.setNumBytes(newBlock.getNumBytes());
List<ReplicaUnderConstruction> staleReplicas = lastBlock
.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
removeStaleReplicas(staleReplicas, lastBlock);
}
/** updates a block in needed reconstruction queue. */
private void updateNeededReconstructions(final BlockInfo block,
final int curReplicasDelta, int expectedReplicasDelta) {
namesystem.writeLock();
try {
if (!isPopulatingReplQueues() || !block.isComplete()) {
return;
}
NumberReplicas repl = countNodes(block);
int pendingNum = pendingReconstruction.getNumReplicas(block);
int curExpectedReplicas = getExpectedRedundancyNum(block);
if (!hasEnoughEffectiveReplicas(block, repl, pendingNum)) {
neededReconstruction.update(block, repl.liveReplicas() + pendingNum,
repl.readOnlyReplicas(), repl.outOfServiceReplicas(),
curExpectedReplicas, curReplicasDelta, expectedReplicasDelta);
} else {
int oldReplicas = repl.liveReplicas() + pendingNum - curReplicasDelta;
int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta;
neededReconstruction.remove(block, oldReplicas, repl.readOnlyReplicas(),
repl.outOfServiceReplicas(), oldExpectedReplicas);
}
} finally {
namesystem.writeUnlock();
}
}
/**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,
* process it as an extra redundancy block.
*/
public void checkRedundancy(BlockCollection bc) {
for (BlockInfo block : bc.getBlocks()) {
short expected = getExpectedRedundancyNum(block);
final NumberReplicas n = countNodes(block);
final int pending = pendingReconstruction.getNumReplicas(block);
if (!hasEnoughEffectiveReplicas(block, n, pending)) {
neededReconstruction.add(block, n.liveReplicas() + pending,
n.readOnlyReplicas(), n.outOfServiceReplicas(), expected);
} else if (shouldProcessExtraRedundancy(n, expected)) {
processExtraRedundancyBlock(block, expected, null, null);
}
}
}
/**
* Get blocks to invalidate for <i>nodeId</i>
* in {@link #invalidateBlocks}.
*
* @return number of blocks scheduled for removal during this iteration.
*/
private int invalidateWorkForOneNode(DatanodeInfo dn) {
final List<Block> toInvalidate;
namesystem.writeLock();
try {
// blocks should not be replicated or removed if safe mode is on
if (namesystem.isInSafeMode()) {
LOG.debug("In safemode, not computing reconstruction work");
return 0;
}
try {
DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn);
if (dnDescriptor == null) {
LOG.warn("DataNode {} cannot be found with UUID {}" +
", removing block invalidation work.", dn, dn.getDatanodeUuid());
invalidateBlocks.remove(dn);
return 0;
}
toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor);
if (toInvalidate == null) {
return 0;
}
} catch(UnregisteredNodeException une) {
return 0;
}
} finally {
namesystem.writeUnlock();
}
blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(),
dn, toInvalidate);
return toInvalidate.size();
}
@VisibleForTesting
public boolean containsInvalidateBlock(final DatanodeInfo dn,
final Block block) {
return invalidateBlocks.contains(dn, block);
}
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) {
List<DatanodeDescriptor> liveNodes = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
.getNodes(storedBlock);
for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
if (storage.getStorageType() == StorageType.PROVIDED
&& storage.getState() == State.NORMAL) {
// assume the policy is satisfied for blocks on PROVIDED storage
// as long as the storage is in normal state.
return true;
}
final DatanodeDescriptor cur = getDatanodeDescriptorFromStorage(storage);
// Nodes under maintenance should be counted as valid replicas from
// rack policy point of view.
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()
&& ((corruptNodes == null) || !corruptNodes.contains(cur))) {
liveNodes.add(cur);
}
}
DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]);
BlockType blockType = storedBlock.getBlockType();
BlockPlacementPolicy placementPolicy = placementPolicies
.getPolicy(blockType);
int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock)
.getRealTotalBlockNum() : storedBlock.getReplication();
return placementPolicy.verifyBlockPlacement(locs, numReplicas)
.isPlacementPolicySatisfied();
}
boolean isNeededReconstructionForMaintenance(BlockInfo storedBlock,
NumberReplicas numberReplicas) {
return storedBlock.isComplete() && (numberReplicas.liveReplicas() <
getMinMaintenanceStorageNum(storedBlock) ||
!isPlacementPolicySatisfied(storedBlock));
}
boolean isNeededReconstruction(BlockInfo storedBlock,
NumberReplicas numberReplicas) {
return isNeededReconstruction(storedBlock, numberReplicas, 0);
}
/**
* A block needs reconstruction if the number of redundancies is less than
* expected or if it does not have enough racks.
*/
boolean isNeededReconstruction(BlockInfo storedBlock,
NumberReplicas numberReplicas, int pending) {
return storedBlock.isComplete() &&
!hasEnoughEffectiveReplicas(storedBlock, numberReplicas, pending);
}
// Exclude maintenance, but make sure it has minimal live replicas
// to satisfy the maintenance requirement.
public short getExpectedLiveRedundancyNum(BlockInfo block,
NumberReplicas numberReplicas) {
final short expectedRedundancy = getExpectedRedundancyNum(block);
return (short)Math.max(expectedRedundancy -
numberReplicas.maintenanceReplicas(),
getMinMaintenanceStorageNum(block));
}
public short getExpectedRedundancyNum(BlockInfo block) {
return block.isStriped() ?
((BlockInfoStriped) block).getRealTotalBlockNum() :
block.getReplication();
}
public long getMissingBlocksCount() {
// not locking
return this.neededReconstruction.getCorruptBlockSize();
}
public long getMissingReplOneBlocksCount() {
// not locking
return this.neededReconstruction.getCorruptReplicationOneBlockSize();
}
public long getHighestPriorityReplicatedBlockCount(){
return this.neededReconstruction.getHighestPriorityReplicatedBlockCount();
}
public long getHighestPriorityECBlockCount(){
return this.neededReconstruction.getHighestPriorityECBlockCount();
}
public BlockInfo addBlockCollection(BlockInfo block,
BlockCollection bc) {
return blocksMap.addBlockCollection(block, bc);
}
/**
* Do some check when adding a block to blocksmap.
* For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID.
*
*/
public BlockInfo addBlockCollectionWithCheck(
BlockInfo block, BlockCollection bc) {
if (!hasNonEcBlockUsingStripedID && !block.isStriped() &&
BlockIdManager.isStripedBlockID(block.getBlockId())) {
hasNonEcBlockUsingStripedID = true;
}
return addBlockCollection(block, bc);
}
BlockCollection getBlockCollection(BlockInfo b) {
return namesystem.getBlockCollection(b.getBlockCollectionId());
}
public int numCorruptReplicas(Block block) {
return corruptReplicas.numCorruptReplicas(block);
}
public void removeBlockFromMap(BlockInfo block) {
for(DatanodeStorageInfo info : blocksMap.getStorages(block)) {
excessRedundancyMap.remove(info.getDatanodeDescriptor(), block);
}
blocksMap.removeBlock(block);
// If block is removed from blocksMap remove it from corruptReplicasMap
corruptReplicas.removeFromCorruptReplicasMap(block);
}
public int getCapacity() {
return blocksMap.getCapacity();
}
/**
* Return an iterator over the set of blocks for which there are no replicas.
*/
public Iterator<BlockInfo> getCorruptReplicaBlockIterator() {
return neededReconstruction.iterator(
LowRedundancyBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
}
/**
* Get the replicas which are corrupt for a given block.
*/
public Collection<DatanodeDescriptor> getCorruptReplicas(Block block) {
return corruptReplicas.getNodes(block);
}
/**
* Get reason for certain corrupted replicas for a given block and a given dn.
*/
public String getCorruptReason(Block block, DatanodeDescriptor node) {
return corruptReplicas.getCorruptReason(block, node);
}
/** @return the size of UnderReplicatedBlocks */
public int numOfUnderReplicatedBlocks() {
return neededReconstruction.size();
}
/**
* Periodically calls computeBlockRecoveryWork().
*/
private class RedundancyMonitor implements Runnable {
@Override
public void run() {
while (namesystem.isRunning()) {
try {
// Process recovery work only when active NN is out of safe mode.
if (isPopulatingReplQueues()) {
computeDatanodeWork();
processPendingReconstructions();
rescanPostponedMisreplicatedBlocks();
}
TimeUnit.MILLISECONDS.sleep(redundancyRecheckIntervalMs);
} catch (Throwable t) {
if (!namesystem.isRunning()) {
LOG.info("Stopping RedundancyMonitor.");
if (!(t instanceof InterruptedException)) {
LOG.info("RedundancyMonitor received an exception"
+ " while shutting down.", t);
}
break;
} else if (!checkNSRunning && t instanceof InterruptedException) {
LOG.info("Stopping RedundancyMonitor for testing.");
break;
}
LOG.error("RedundancyMonitor thread received Runtime exception. ",
t);
terminate(1, t);
}
}
}
}
/**
* Runnable that monitors the fragmentation of the StorageInfo TreeSet and
* compacts it when it falls under a certain threshold.
*/
private class StorageInfoDefragmenter implements Runnable {
@Override
public void run() {
while (namesystem.isRunning()) {
try {
// Check storage efficiency only when active NN is out of safe mode.
if (isPopulatingReplQueues()) {
scanAndCompactStorages();
}
Thread.sleep(storageInfoDefragmentInterval);
} catch (Throwable t) {
if (!namesystem.isRunning()) {
LOG.info("Stopping thread.");
if (!(t instanceof InterruptedException)) {
LOG.info("Received an exception while shutting down.", t);
}
break;
} else if (!checkNSRunning && t instanceof InterruptedException) {
LOG.info("Stopping for testing.");
break;
}
LOG.error("Thread received Runtime exception.", t);
terminate(1, t);
}
}
}
private void scanAndCompactStorages() throws InterruptedException {
ArrayList<String> datanodesAndStorages = new ArrayList<>();
for (DatanodeDescriptor node
: datanodeManager.getDatanodeListForReport(DatanodeReportType.ALL)) {
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
try {
namesystem.readLock();
double ratio = storage.treeSetFillRatio();
if (ratio < storageInfoDefragmentRatio) {
datanodesAndStorages.add(node.getDatanodeUuid());
datanodesAndStorages.add(storage.getStorageID());
}
LOG.debug("StorageInfo TreeSet fill ratio {} : {}{}",
storage.getStorageID(), ratio,
(ratio < storageInfoDefragmentRatio)
? " (queued for defragmentation)" : "");
} finally {
namesystem.readUnlock();
}
}
}
if (!datanodesAndStorages.isEmpty()) {
for (int i = 0; i < datanodesAndStorages.size(); i += 2) {
namesystem.writeLock();
try {
final DatanodeDescriptor dn = datanodeManager.
getDatanode(datanodesAndStorages.get(i));
if (dn == null) {
continue;
}
final DatanodeStorageInfo storage = dn.
getStorageInfo(datanodesAndStorages.get(i + 1));
if (storage != null) {
boolean aborted =
!storage.treeSetCompact(storageInfoDefragmentTimeout);
if (aborted) {
// Compaction timed out, reset iterator to continue with
// the same storage next iteration.
i -= 2;
}
LOG.info("StorageInfo TreeSet defragmented {} : {}{}",
storage.getStorageID(), storage.treeSetFillRatio(),
aborted ? " (aborted)" : "");
}
} finally {
namesystem.writeUnlock();
}
// Wait between each iteration
Thread.sleep(1000);
}
}
}
}
/**
* Compute block replication and block invalidation work that can be scheduled
* on data-nodes. The datanode will be informed of this work at the next
* heartbeat.
*
* @return number of blocks scheduled for replication or removal.
*/
int computeDatanodeWork() {
// Blocks should not be replicated or removed if in safe mode.
// It's OK to check safe mode here w/o holding lock, in the worst
// case extra replications will be scheduled, and these will get
// fixed up later.
if (namesystem.isInSafeMode()) {
return 0;
}
final int numlive = heartbeatManager.getLiveDatanodeCount();
final int blocksToProcess = numlive
* this.blocksReplWorkMultiplier;
final int nodesToProcess = (int) Math.ceil(numlive
* this.blocksInvalidateWorkPct);
int workFound = this.computeBlockReconstructionWork(blocksToProcess);
// Update counters
namesystem.writeLock();
try {
this.updateState();
this.scheduledReplicationBlocksCount = workFound;
} finally {
namesystem.writeUnlock();
}
workFound += this.computeInvalidateWork(nodesToProcess);
return workFound;
}
/**
* Clear all queues that hold decisions previously made by
* this NameNode.
*/
public void clearQueues() {
neededReconstruction.clear();
pendingReconstruction.clear();
excessRedundancyMap.clear();
invalidateBlocks.clear();
datanodeManager.clearPendingQueues();
postponedMisreplicatedBlocks.clear();
};
public static LocatedBlock newLocatedBlock(
ExtendedBlock b, DatanodeStorageInfo[] storages,
long startOffset, boolean corrupt) {
// startOffset is unknown
return new LocatedBlock(
b, DatanodeStorageInfo.toDatanodeInfos(storages),
DatanodeStorageInfo.toStorageIDs(storages),
DatanodeStorageInfo.toStorageTypes(storages),
startOffset, corrupt,
null);
}
public static LocatedStripedBlock newLocatedStripedBlock(
ExtendedBlock b, DatanodeStorageInfo[] storages,
byte[] indices, long startOffset, boolean corrupt) {
// startOffset is unknown
return new LocatedStripedBlock(
b, DatanodeStorageInfo.toDatanodeInfos(storages),
DatanodeStorageInfo.toStorageIDs(storages),
DatanodeStorageInfo.toStorageTypes(storages),
indices, startOffset, corrupt,
null);
}
public static LocatedBlock newLocatedBlock(ExtendedBlock eb, BlockInfo info,
DatanodeStorageInfo[] locs, long offset) throws IOException {
final LocatedBlock lb;
if (info.isStriped()) {
lb = newLocatedStripedBlock(eb, locs,
info.getUnderConstructionFeature().getBlockIndices(),
offset, false);
} else {
lb = newLocatedBlock(eb, locs, offset, false);
}
return lb;
}
/**
* A simple result enum for the result of
* {@link BlockManager#processMisReplicatedBlock(BlockInfo)}.
*/
enum MisReplicationResult {
/** The block should be invalidated since it belongs to a deleted file. */
INVALID,
/** The block is currently under-replicated. */
UNDER_REPLICATED,
/** The block is currently over-replicated. */
OVER_REPLICATED,
/** A decision can't currently be made about this block. */
POSTPONE,
/** The block is under construction, so should be ignored. */
UNDER_CONSTRUCTION,
/** The block is properly replicated. */
OK
}
public void shutdown() {
stopReconstructionInitializer();
blocksMap.close();
MBeans.unregister(mxBeanName);
mxBeanName = null;
}
public void clear() {
blockIdManager.clear();
clearQueues();
blocksMap.clear();
}
public BlockReportLeaseManager getBlockReportLeaseManager() {
return blockReportLeaseManager;
}
@Override // BlockStatsMXBean
public Map<StorageType, StorageTypeStats> getStorageTypeStats() {
return datanodeManager.getDatanodeStatistics().getStorageTypeStats();
}
/**
* Initialize replication queues.
*/
public void initializeReplQueues() {
LOG.info("initializing replication queues");
processMisReplicatedBlocks();
initializedReplQueues = true;
}
/**
* Check if replication queues are to be populated
* @return true when node is HAState.Active and not in the very first safemode
*/
public boolean isPopulatingReplQueues() {
if (!shouldPopulateReplQueues()) {
return false;
}
return initializedReplQueues;
}
public void setInitializedReplQueues(boolean v) {
this.initializedReplQueues = v;
}
public boolean shouldPopulateReplQueues() {
HAContext haContext = namesystem.getHAContext();
if (haContext == null || haContext.getState() == null)
return false;
return haContext.getState().shouldPopulateReplQueues();
}
boolean getShouldPostponeBlocksFromFuture() {
return shouldPostponeBlocksFromFuture;
}
// async processing of an action, used for IBRs.
public void enqueueBlockOp(final Runnable action) throws IOException {
try {
blockReportThread.enqueue(action);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
// sync batch processing for a full BR.
public <T> T runBlockOp(final Callable<T> action)
throws IOException {
final FutureTask<T> future = new FutureTask<T>(action);
enqueueBlockOp(future);
try {
return future.get();
} catch (ExecutionException ee) {
Throwable cause = ee.getCause();
if (cause == null) {
cause = ee;
}
if (!(cause instanceof IOException)) {
cause = new IOException(cause);
}
throw (IOException)cause;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException(ie);
}
}
/**
* Notification of a successful block recovery.
* @param block for which the recovery succeeded
*/
public void successfulBlockRecovery(BlockInfo block) {
pendingRecoveryBlocks.remove(block);
}
/**
* Checks whether a recovery attempt has been made for the given block.
* If so, checks whether that attempt has timed out.
* @param b block for which recovery is being attempted
* @return true if no recovery attempt has been made or
* the previous attempt timed out
*/
public boolean addBlockRecoveryAttempt(BlockInfo b) {
return pendingRecoveryBlocks.add(b);
}
@VisibleForTesting
public void flushBlockOps() throws IOException {
runBlockOp(new Callable<Void>(){
@Override
public Void call() {
return null;
}
});
}
public int getBlockOpQueueLength() {
return blockReportThread.queue.size();
}
private class BlockReportProcessingThread extends Thread {
private static final long MAX_LOCK_HOLD_MS = 4;
private long lastFull = 0;
private final BlockingQueue<Runnable> queue =
new ArrayBlockingQueue<Runnable>(1024);
BlockReportProcessingThread() {
super("Block report processor");
setDaemon(true);
}
@Override
public void run() {
try {
processQueue();
} catch (Throwable t) {
ExitUtil.terminate(1,
getName() + " encountered fatal exception: " + t);
}
}
private void processQueue() {
while (namesystem.isRunning()) {
NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
try {
Runnable action = queue.take();
// batch as many operations in the write lock until the queue
// runs dry, or the max lock hold is reached.
int processed = 0;
namesystem.writeLock();
metrics.setBlockOpsQueued(queue.size() + 1);
try {
long start = Time.monotonicNow();
do {
processed++;
action.run();
if (Time.monotonicNow() - start > MAX_LOCK_HOLD_MS) {
break;
}
action = queue.poll();
} while (action != null);
} finally {
namesystem.writeUnlock();
metrics.addBlockOpsBatched(processed - 1);
}
} catch (InterruptedException e) {
// ignore unless thread was specifically interrupted.
if (Thread.interrupted()) {
break;
}
}
}
queue.clear();
}
void enqueue(Runnable action) throws InterruptedException {
if (!queue.offer(action)) {
if (!isAlive() && namesystem.isRunning()) {
ExitUtil.terminate(1, getName()+" is not running");
}
long now = Time.monotonicNow();
if (now - lastFull > 4000) {
lastFull = now;
LOG.info("Block report queue is full");
}
queue.put(action);
}
}
}
/**
* @return redundancy thread.
*/
@VisibleForTesting
Daemon getRedundancyThread() {
return redundancyThread;
}
public BlockIdManager getBlockIdManager() {
return blockIdManager;
}
public long nextGenerationStamp(boolean legacyBlock) throws IOException {
return blockIdManager.nextGenerationStamp(legacyBlock);
}
public boolean isLegacyBlock(Block block) {
return blockIdManager.isLegacyBlock(block);
}
public long nextBlockId(BlockType blockType) {
return blockIdManager.nextBlockId(blockType);
}
boolean isGenStampInFuture(Block block) {
return blockIdManager.isGenStampInFuture(block);
}
boolean isReplicaCorrupt(BlockInfo blk, DatanodeDescriptor d) {
return corruptReplicas.isReplicaCorrupt(blk, d);
}
private int setBlockIndices(BlockInfo blk, byte[] blockIndices, int i,
DatanodeStorageInfo storage) {
// TODO this can be more efficient
if (blockIndices != null) {
byte index = ((BlockInfoStriped)blk).getStorageBlockIndex(storage);
assert index >= 0;
blockIndices[i++] = index;
}
return i;
}
private static long getBlockRecoveryTimeout(long heartbeatIntervalSecs) {
return TimeUnit.SECONDS.toMillis(heartbeatIntervalSecs *
BLOCK_RECOVERY_TIMEOUT_MULTIPLIER);
}
@VisibleForTesting
public void setBlockRecoveryTimeout(long blockRecoveryTimeout) {
pendingRecoveryBlocks.setRecoveryTimeoutInterval(blockRecoveryTimeout);
}
@VisibleForTesting
public ProvidedStorageMap getProvidedStorageMap() {
return providedStorageMap;
}
/**
* Create SPS manager instance. It manages the user invoked sps paths and does
* the movement.
*
* @param conf
* configuration
* @return true if the instance is successfully created, false otherwise.
*/
private boolean createSPSManager(final Configuration conf) {
return createSPSManager(conf, null);
}
/**
* Create SPS manager instance. It manages the user invoked sps paths and does
* the movement.
*
* @param conf
* configuration
* @param spsMode
* satisfier mode
* @return true if the instance is successfully created, false otherwise.
*/
public boolean createSPSManager(final Configuration conf,
final String spsMode) {
// sps manager manages the user invoked sps paths and does the movement.
// StoragePolicySatisfier(SPS) configs
boolean storagePolicyEnabled = conf.getBoolean(
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
String modeVal = spsMode;
if (org.apache.commons.lang3.StringUtils.isBlank(modeVal)) {
modeVal = conf.get(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
}
StoragePolicySatisfierMode mode = StoragePolicySatisfierMode
.fromString(modeVal);
if (!storagePolicyEnabled || mode == StoragePolicySatisfierMode.NONE) {
LOG.info("Storage policy satisfier is disabled");
return false;
}
spsManager = new StoragePolicySatisfyManager(conf, namesystem);
return true;
}
/**
* Nullify SPS manager as this feature is disabled fully.
*/
public void disableSPS() {
spsManager = null;
}
/**
* @return sps manager.
*/
public StoragePolicySatisfyManager getSPSManager() {
return spsManager;
}
}
| {
"content_hash": "17101cabf42662cc583934c7f9999c49",
"timestamp": "",
"source": "github",
"line_count": 5047,
"max_line_length": 138,
"avg_line_length": 38.389142064592825,
"alnum_prop": 0.6780593548387097,
"repo_name": "dierobotsdie/hadoop",
"id": "675221a1ec52d4ec859306ae56d581bf94483e48",
"size": "194556",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "69197"
},
{
"name": "C",
"bytes": "1491086"
},
{
"name": "C++",
"bytes": "1853607"
},
{
"name": "CMake",
"bytes": "57182"
},
{
"name": "CSS",
"bytes": "60295"
},
{
"name": "HTML",
"bytes": "244815"
},
{
"name": "Java",
"bytes": "71313860"
},
{
"name": "JavaScript",
"bytes": "791760"
},
{
"name": "Protocol Buffer",
"bytes": "287004"
},
{
"name": "Python",
"bytes": "23553"
},
{
"name": "Shell",
"bytes": "394451"
},
{
"name": "TLA",
"bytes": "14993"
},
{
"name": "TeX",
"bytes": "19322"
},
{
"name": "XSLT",
"bytes": "16894"
}
],
"symlink_target": ""
} |
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
# This script looks for any minor allele frequencies of 0.5
# and then ensures that the minor allele stored in the variation
# table is not the reference allele
use strict;
use warnings;
use Getopt::Long;
use Bio::EnsEMBL::Registry;
my $registry_file;
my $help;
GetOptions(
"registry|r=s" => \$registry_file,
"help|h" => \$help,
);
unless ($registry_file) {
print "Must supply a registry file...\n" unless $help;
$help = 1;
}
if ($help) {
print "Usage: $0 --registry <reg_file>\n";
exit(0);
}
my $registry = 'Bio::EnsEMBL::Registry';
$registry->load_all($registry_file);
my $dbh = $registry->get_adaptor(
'human', 'variation', 'variation'
)->dbc->db_handle;
my $get_vars_sth = $dbh->prepare(qq{
SELECT v.variation_id, v.name, v.minor_allele
FROM variation v, variation_feature vf
WHERE v.minor_allele_freq = 0.5
AND v.variation_id = vf.variation_id
AND v.minor_allele = SUBSTR(vf.allele_string,1,1)
});
my $get_syns_sth = $dbh->prepare(qq{
SELECT name
FROM variation_synonym
WHERE variation_id = ?
});
my $get_maf_sth = $dbh->prepare(qq{
SELECT allele
FROM maf
WHERE snp_id = ?
});
my $fix_maf_sth = $dbh->prepare(qq{
UPDATE variation
SET minor_allele = ?
WHERE variation_id = ?
});
$get_vars_sth->execute;
my $count = 0;
sub get_new_allele {
my ($snp_id, $old_allele) = @_;
$get_maf_sth->execute($snp_id);
my $new_allele;
while (my ($allele) = $get_maf_sth->fetchrow_array) {
if ($allele ne $old_allele) {
$new_allele = $allele;
last;
}
}
return $new_allele;
}
while (my ($v_id, $name, $old_allele) = $get_vars_sth->fetchrow_array) {
$name =~ s/^rs//;
my $new_allele = get_new_allele($name, $old_allele);
unless ($new_allele) {
$get_syns_sth->execute($v_id);
while (my ($name) = $get_syns_sth->fetchrow_array) {
next unless $name =~ /^rs/;
$name =~ s/^rs//;
$new_allele = get_new_allele($name, $old_allele);
last if $new_allele;
}
}
die "Didn't find alternative minor allele for variation $v_id?" unless $new_allele;
$count++;
$fix_maf_sth->execute($new_allele, $v_id);
}
print "Corrected $count alleles\n";
| {
"content_hash": "0ee73965e3add5e91be3f40ee517f7d1",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 87,
"avg_line_length": 21.408333333333335,
"alnum_prop": 0.5932269365511872,
"repo_name": "willmclaren/ensembl-variation",
"id": "4c3fb070c7990e0dc4e3939a76f103f38616c387",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/import/post_process_maf.pl",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "44285"
},
{
"name": "Makefile",
"bytes": "517"
},
{
"name": "PHP",
"bytes": "4029"
},
{
"name": "Perl",
"bytes": "5165898"
},
{
"name": "Python",
"bytes": "519"
},
{
"name": "Shell",
"bytes": "5753"
}
],
"symlink_target": ""
} |
require "formula"
require "service"
describe Homebrew::Service do
let(:klass) do
Class.new(Formula) do
url "https://brew.sh/test-1.0.tbz"
end
end
let(:name) { "formula_name" }
let(:path) { Formulary.core_path(name) }
let(:spec) { :stable }
let(:f) { klass.new(name, path, spec) }
describe "#std_service_path_env" do
it "returns valid std_service_path_env" do
f.class.service do
run opt_bin/"beanstalkd"
run_type :immediate
environment_variables PATH: std_service_path_env
error_log_path var/"log/beanstalkd.error.log"
log_path var/"log/beanstalkd.log"
working_dir var
keep_alive true
end
path = f.service.std_service_path_env
expect(path).to eq("#{HOMEBREW_PREFIX}/bin:#{HOMEBREW_PREFIX}/sbin:/usr/bin:/bin:/usr/sbin:/sbin")
end
end
describe "#manual_command" do
it "returns valid manual_command" do
f.class.service do
run "#{HOMEBREW_PREFIX}/bin/beanstalkd"
run_type :immediate
environment_variables PATH: std_service_path_env, ETC_DIR: etc/"beanstalkd"
error_log_path var/"log/beanstalkd.error.log"
log_path var/"log/beanstalkd.log"
working_dir var
keep_alive true
end
path = f.service.manual_command
expect(path).to eq("ETC_DIR=\"#{HOMEBREW_PREFIX}/etc/beanstalkd\" #{HOMEBREW_PREFIX}/bin/beanstalkd")
end
it "returns valid manual_command without variables" do
f.class.service do
run opt_bin/"beanstalkd"
run_type :immediate
environment_variables PATH: std_service_path_env
error_log_path var/"log/beanstalkd.error.log"
log_path var/"log/beanstalkd.log"
working_dir var
keep_alive true
end
path = f.service.manual_command
expect(path).to eq("#{HOMEBREW_PREFIX}/opt/formula_name/bin/beanstalkd")
end
end
describe "#to_plist" do
it "returns valid plist" do
f.class.service do
run [opt_bin/"beanstalkd", "test"]
run_type :immediate
environment_variables PATH: std_service_path_env, FOO: "BAR", ETC_DIR: etc/"beanstalkd"
error_log_path var/"log/beanstalkd.error.log"
log_path var/"log/beanstalkd.log"
input_path var/"in/beanstalkd"
root_dir var
working_dir var
keep_alive true
restart_delay 30
macos_legacy_timers true
end
plist = f.service.to_plist
plist_expect = <<~EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>EnvironmentVariables</key>
\t<dict>
\t\t<key>ETC_DIR</key>
\t\t<string>#{HOMEBREW_PREFIX}/etc/beanstalkd</string>
\t\t<key>FOO</key>
\t\t<string>BAR</string>
\t\t<key>PATH</key>
\t\t<string>#{HOMEBREW_PREFIX}/bin:#{HOMEBREW_PREFIX}/sbin:/usr/bin:/bin:/usr/sbin:/sbin</string>
\t</dict>
\t<key>KeepAlive</key>
\t<true/>
\t<key>Label</key>
\t<string>homebrew.mxcl.formula_name</string>
\t<key>LegacyTimers</key>
\t<true/>
\t<key>ProgramArguments</key>
\t<array>
\t\t<string>#{HOMEBREW_PREFIX}/opt/formula_name/bin/beanstalkd</string>
\t\t<string>test</string>
\t</array>
\t<key>RootDirectory</key>
\t<string>#{HOMEBREW_PREFIX}/var</string>
\t<key>RunAtLoad</key>
\t<true/>
\t<key>StandardErrorPath</key>
\t<string>#{HOMEBREW_PREFIX}/var/log/beanstalkd.error.log</string>
\t<key>StandardInPath</key>
\t<string>#{HOMEBREW_PREFIX}/var/in/beanstalkd</string>
\t<key>StandardOutPath</key>
\t<string>#{HOMEBREW_PREFIX}/var/log/beanstalkd.log</string>
\t<key>TimeOut</key>
\t<integer>30</integer>
\t<key>WorkingDirectory</key>
\t<string>#{HOMEBREW_PREFIX}/var</string>
</dict>
</plist>
EOS
expect(plist).to eq(plist_expect)
end
it "returns valid partial plist" do
f.class.service do
run opt_bin/"beanstalkd"
run_type :immediate
end
plist = f.service.to_plist
plist_expect = <<~EOS
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>Label</key>
\t<string>homebrew.mxcl.formula_name</string>
\t<key>ProgramArguments</key>
\t<array>
\t\t<string>#{HOMEBREW_PREFIX}/opt/formula_name/bin/beanstalkd</string>
\t</array>
\t<key>RunAtLoad</key>
\t<true/>
</dict>
</plist>
EOS
expect(plist).to eq(plist_expect)
end
end
describe "#to_systemd_unit" do
it "returns valid unit" do
f.class.service do
run [opt_bin/"beanstalkd", "test"]
run_type :immediate
environment_variables PATH: std_service_path_env, FOO: "BAR"
error_log_path var/"log/beanstalkd.error.log"
log_path var/"log/beanstalkd.log"
input_path var/"in/beanstalkd"
root_dir var
working_dir var
keep_alive true
restart_delay 30
macos_legacy_timers true
end
unit = f.service.to_systemd_unit
std_path = "#{HOMEBREW_PREFIX}/bin:#{HOMEBREW_PREFIX}/sbin:/usr/bin:/bin:/usr/sbin:/sbin"
unit_expect = <<~EOS
[Unit]
Description=Homebrew generated unit for formula_name
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
ExecStart=#{HOMEBREW_PREFIX}/opt/#{name}/bin/beanstalkd test
Restart=always
RestartSec=30
WorkingDirectory=#{HOMEBREW_PREFIX}/var
RootDirectory=#{HOMEBREW_PREFIX}/var
StandardInput=file:#{HOMEBREW_PREFIX}/var/in/beanstalkd
StandardOutput=append:#{HOMEBREW_PREFIX}/var/log/beanstalkd.log
StandardError=append:#{HOMEBREW_PREFIX}/var/log/beanstalkd.error.log
Environment=\"PATH=#{std_path}\"
Environment=\"FOO=BAR\"
EOS
expect(unit).to eq(unit_expect.strip)
end
it "returns valid partial unit" do
f.class.service do
run opt_bin/"beanstalkd"
run_type :immediate
end
unit = f.service.to_systemd_unit
unit_expect = <<~EOS
[Unit]
Description=Homebrew generated unit for formula_name
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
ExecStart=#{HOMEBREW_PREFIX}/opt/#{name}/bin/beanstalkd
EOS
expect(unit).to eq(unit_expect)
end
end
describe "#command" do
it "returns @run data" do
f.class.service do
run [opt_bin/"beanstalkd", "test"]
run_type :immediate
end
command = f.service.command
expect(command).to eq(["#{HOMEBREW_PREFIX}/opt/#{name}/bin/beanstalkd", "test"])
end
end
end
| {
"content_hash": "4bf551a34a582a0ce39a5309484d73d5",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 110,
"avg_line_length": 31.2863436123348,
"alnum_prop": 0.5987045902562659,
"repo_name": "claui/brew",
"id": "efa4391798944e1a55da93179a57dfb3b07c0bf5",
"size": "7148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Library/Homebrew/test/service_spec.rb",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1709"
},
{
"name": "HTML",
"bytes": "29536"
},
{
"name": "PostScript",
"bytes": "485"
},
{
"name": "Roff",
"bytes": "97024"
},
{
"name": "Ruby",
"bytes": "5021532"
},
{
"name": "Shell",
"bytes": "227666"
},
{
"name": "Swift",
"bytes": "1788"
}
],
"symlink_target": ""
} |
<?php
function flash($title=null, $message=null, $level='info')
{
$flash = app('App\Http\Flash');
if (func_num_args() == 0) {
return $flash;
}
return $flash->info($title,$message);
}
| {
"content_hash": "e4b5db23e907bea93efb3696fb236284",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 17.5,
"alnum_prop": 0.5571428571428572,
"repo_name": "luisedware/Learning-Laravel",
"id": "b771124d714d07114201505f56110f23a8e12a80",
"size": "210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Build-Project-Flyer-With-Me/app/helpers.php",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "210900"
},
{
"name": "HTML",
"bytes": "348677"
},
{
"name": "JavaScript",
"bytes": "122449"
},
{
"name": "PHP",
"bytes": "435253"
},
{
"name": "Shell",
"bytes": "1357"
},
{
"name": "Vue",
"bytes": "1689"
}
],
"symlink_target": ""
} |
> :warning: **Deprecated**: This is not maintained any more, and references Oracle JDK, which no longer allows free usage and redistribution.
>
> Alternatives exist, such as [Eclipe Adoptium Temurin](https://hub.docker.com/_/eclipse-temurin) (Formerly AdoptOpenJDK)
Basic [Docker](https://www.docker.com/) image to run [Java](https://www.java.com/) applications.
This is based on [Alpine Linux](http://alpinelinux.org/) to keep the size minimal (about 25% of an ubuntu-based image).
### Tags
* [`latest` or `8` or `serverjre-8`](https://github.com/jeanblanchard/docker-java/blob/master/serverjre/Dockerfile): Oracle Java 8 (Server JRE) [](https://microbadger.com/images/jeanblanchard/java "Get your own image badge on microbadger.com")
* [`jdk-8`](https://github.com/jeanblanchard/docker-java/blob/master/jdk/Dockerfile): Oracle Java 8 (JDK) [](https://microbadger.com/images/jeanblanchard/java:jdk-8 "Get your own image badge on microbadger.com")
* [`jre-8`](https://github.com/jeanblanchard/docker-java/blob/master/jre/Dockerfile): Oracle Java 8 (JRE) [](https://microbadger.com/images/jeanblanchard/java:jre-8 "Get your own image badge on microbadger.com")
Additionally, tags are created for each oracle release (e.g. `8u191`, `jdk-8u191` or `jre-8u191`).
Note: Sometimes Oracle releases two versions at the same time : a CPU (Critical Patch Update) with only critical bug
fixes, and a PSU (Patch Set Update) with additional "non-critical fixes". In this case, the CPU will be the default,
as [recommended by oracle](http://www.oracle.com/technetwork/java/javase/cpu-psu-explained-2331472.html).
If needed, PSU releases are still accessible, by using their specific release tag (e.g `jdk-8u192`)
### Usage
Example:
docker run -it --rm jeanblanchard/java:8 java -version
| {
"content_hash": "b9c4769efde838c4bd895e398e6faebd",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 314,
"avg_line_length": 80.48,
"alnum_prop": 0.7529821073558648,
"repo_name": "jeanblanchard/docker-java",
"id": "b6d4089bd188be76aa5ad4943ed085bf187b684e",
"size": "2047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "10750"
},
{
"name": "Shell",
"bytes": "526"
}
],
"symlink_target": ""
} |
module CCS
class XMPPConnection
include Celluloid::IO
finalizer :close_sender
def initialize(params = {})
@config = CCS.config.connection(params[:sender_id])
@id = params[:id]
@handler = params[:handler]
@running = false
@send_messages = {}
@semaphore = Semaphore.new(MAX_MESSAGES)
@queue = @handler.connection_queue(@id)
open_xmpp
end
def redis
@redis ||= ::Redis.new(CCS.config.redis)
end
def open_xmpp
xmpp_params = {
handler: Actor.current,
username: "#{@config['sender_id']}@gcm.googleapis.com",
password: @config['api_key'],
host: CCS.config.endpoint['host'],
port: CCS.config.endpoint['port']
}
@xmpp_client = XMPPSimple::Client.new(xmpp_params).connect
end
def sender_loop
while @running
next unless @semaphore.take
msg_str = redis.brpoplpush(@handler.queues[:ccs_queue], @handler.queues[:connection])
msg = JSON(msg_str)
send_stanza(msg)
@send_messages[msg['message_id']] = msg_str
end
end
def ack(msg)
CCS.logger.debug("Ack: #{msg}")
send_stanza('to' => msg['from'],
'message_id' => msg['message_id'],
'message_type' => 'ack'
)
end
def send_stanza(content = {})
msg = '<message><gcm xmlns="google:mobile:data">'
msg += content.to_json
msg += '</gcm></message>'
CCS.logger.debug "Write: #{msg}"
@xmpp_client.write_data(msg)
end
def drain
close_sender
@handler.drain(@id)
end
def close_sender
@running = false
@semaphore.interrupt
end
def connected
CCS.logger.debug('Connected')
@running = true
async.sender_loop
end
def disconnected
CCS.logger.debug('Disconnected')
@handler.close_connection(@id)
end
def message(node)
xml = Nokogiri::XML(node).remove_namespaces!
content = JSON(xml.xpath('.//gcm').text)
type = xml.xpath('//message').attribute('type')
if type && type.value == 'error'
handle_xml_error(node)
return
end
return if content.nil? # discard empty messages
CCS.logger.debug("Type: #{content['message_type']}")
content['received_at'] = Time.now.utc.to_i
content['message_type'] ||= 'upstream'
if %w(ack nack receipt control upstream).include?(content['message_type'])
CCS.debug("Received #{content['message_type']} message")
send("handle_#{content['message_type']}")
else
CCS.logger.info("Received unknown message type: #{content['message_type']}")
end
end
def handle_xml_error(node)
# TODO:
# This shouldn't happen
# but could be implemented, just to be save
end
def handle_receipt(content)
CCS.logger.debug("Delivery receipt received for: #{content['message_id']}")
redis.rpush(@handler.queues[:receipt_queue], content.to_s)
ack(content)
end
def handle_ack(content)
msg = @send_messages.delete(content['message_id'])
if msg.nil?
CCS.logger.info("Received ack for unknown message: #{content['message_id']}")
return
end
CCS.logger.debug("NOT FOUND: #{msg}") if redis.lrem(@queue, -1, msg) < 1
@semaphore.release
end
def handle_nack(content)
msg = @send_messages.delete(content['message_id'])
if msg.nil?
CCS.info("Received nack for unknown message: #{content['message_id']}")
return
end
redis.lrem(@queue, -1, msg)
redis.rpush(@handler.queues[:ccs_error], msg.to_s)
@semaphore.release
end
def handle_control(content)
CCS.debug.info("Received control type: #{content['control_type']}")
drain if content['control_type'] == 'CONNECTION_DRAINING'
end
def handle_upstream(content)
redis.rpush(@handler.queues[:upstream_queue], content.to_s)
ack(content)
end
end
end
| {
"content_hash": "1e3af85d800123eb59206d18d36407da",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 93,
"avg_line_length": 27.623287671232877,
"alnum_prop": 0.5913711877014629,
"repo_name": "l3akage/ccs",
"id": "18ff1f1d6179958da4ae46ed606af976d176202f",
"size": "4033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ccs/xmpp/xmpp_connection.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "18168"
}
],
"symlink_target": ""
} |
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: taflanidi
---
### Prerequisites
* [ ] Put an `X` between the brackets on this line if you have done all of the following:
* read our [Wiki](https://github.com/RedMadRobot/input-mask-android/wiki);
* read the entire [Known issues](https://github.com/RedMadRobot/input-mask-android#knownissues) section;
* checked that my feature request isn't already [filled](https://github.com/RedMadRobot/input-mask-android/issues);
* searched StackOverflow's [`input-mask`](https://stackoverflow.com/questions/tagged/input-mask) tag for similar problems.
### Summary
One paragraph explanation of the feature.
### Motivation
Why are we doing this? What use cases does it support? What is the expected outcome?
### Describe the solution you'd like
A clear and concise description of what you want to happen.
### Describe alternatives you've considered
A clear and concise description of the alternative solutions you've considered. Be sure to explain why existing customizability isn't suitable for this feature.
### Additional context
Add any other context or screenshots about the feature request here.
| {
"content_hash": "132327486ce912c5b41876c5c1dd74ae",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 160,
"avg_line_length": 39.96666666666667,
"alnum_prop": 0.7664720600500416,
"repo_name": "RedMadRobot/input-mask-android",
"id": "e66acd4ea9f84d8de441530e287a8956b692de7d",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".github/ISSUE_TEMPLATE/feature_request.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "3195"
},
{
"name": "Kotlin",
"bytes": "147476"
}
],
"symlink_target": ""
} |
package org.gradle.performance.fixture;
import org.gradle.integtests.fixtures.executer.DurationMeasurement;
import org.gradle.performance.measure.Duration;
import org.gradle.performance.measure.MeasuredOperation;
import org.joda.time.DateTime;
public class DurationMeasurementImpl implements DurationMeasurement {
private DateTime start;
private long startNanos;
private final MeasuredOperation measuredOperation;
public DurationMeasurementImpl(MeasuredOperation measuredOperation) {
this.measuredOperation = measuredOperation;
}
@Override
public void start() {
start = DateTime.now();
startNanos = System.nanoTime();
}
@Override
public void stop() {
DateTime end = DateTime.now();
long endNanos = System.nanoTime();
measuredOperation.setStart(start);
measuredOperation.setEnd(end);
measuredOperation.setTotalTime(Duration.millis((endNanos - startNanos) / 1000000L));
}
public static void measure(MeasuredOperation measuredOperation, Runnable runnable) {
DurationMeasurementImpl durationMeasurement = new DurationMeasurementImpl(measuredOperation);
durationMeasurement.start();
try {
runnable.run();
} finally {
durationMeasurement.stop();
}
}
}
| {
"content_hash": "81ee7db4d024d922cd12973432f13665",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 101,
"avg_line_length": 31.023255813953487,
"alnum_prop": 0.7083958020989505,
"repo_name": "gstevey/gradle",
"id": "d47d77d44bd50e9e83b37631ed9b54ba784ce225",
"size": "1949",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "subprojects/internal-performance-testing/src/main/groovy/org/gradle/performance/fixture/DurationMeasurementImpl.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "277"
},
{
"name": "Brainfuck",
"bytes": "54"
},
{
"name": "C",
"bytes": "98528"
},
{
"name": "C++",
"bytes": "1806123"
},
{
"name": "CSS",
"bytes": "47413"
},
{
"name": "CoffeeScript",
"bytes": "620"
},
{
"name": "GAP",
"bytes": "212"
},
{
"name": "Gherkin",
"bytes": "191"
},
{
"name": "Groovy",
"bytes": "18963577"
},
{
"name": "HTML",
"bytes": "28452"
},
{
"name": "Java",
"bytes": "19713820"
},
{
"name": "JavaScript",
"bytes": "204280"
},
{
"name": "Kotlin",
"bytes": "4763"
},
{
"name": "Objective-C",
"bytes": "652"
},
{
"name": "Objective-C++",
"bytes": "441"
},
{
"name": "Python",
"bytes": "57"
},
{
"name": "Ruby",
"bytes": "1087"
},
{
"name": "Scala",
"bytes": "23713"
},
{
"name": "Shell",
"bytes": "6858"
},
{
"name": "XSLT",
"bytes": "35797"
}
],
"symlink_target": ""
} |
require 'hashie'
module Openbeautyfacts
class Label < Hashie::Mash
# TODO: Add more locales
LOCALE_PATHS = {
'fr' => 'labels',
'uk' => 'labels',
'us' => 'labels',
'world' => 'labels'
}
class << self
# Get labels
#
def all(locale: DEFAULT_LOCALE, domain: DEFAULT_DOMAIN)
if path = LOCALE_PATHS[locale]
Product.tags_from_page(self, "https://#{locale}.#{domain}/#{path}")
end
end
end
# Get products with label
#
def products(page: -1)
Product.from_website_page(url, page: page, products_count: products_count) if url
end
end
end
| {
"content_hash": "3766a5ad77e4ff0559991a290015fba3",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 87,
"avg_line_length": 19.78787878787879,
"alnum_prop": 0.55895865237366,
"repo_name": "openfoodfacts/openbeautyfacts-ruby",
"id": "67e15755fd9bd987cae7378fbca93d2bf6f91ebe",
"size": "653",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lib/openbeautyfacts/label.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "43718"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html lang="en-us" dir="ltr" itemscope itemtype="http://schema.org/Article">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>José e Pilar</title>
<meta name="author" content="" />
<meta name="description"
content="Este é um documentário que conta, com imagens caseiras e inusitadas em eventos públicos, os últimos anos do escritor português ganhador do prêmio Nobel José Saramago e sua esposa, Pilar. Por meio de..."/>
<meta name="yandex-verification" content="48a8210fc043c5e8" />
<meta name="generator" content="Hugo 0.54.0" />
<meta itemprop="name" content="José e Pilar"/>
<meta itemprop="description"
content="Este é um documentário que conta, com imagens caseiras e inusitadas em eventos públicos, os últimos anos do escritor português ganhador do prêmio Nobel José Saramago e sua esposa, Pilar. Por meio de..."/>
<meta itemprop="image"
content="/img/logo.svg"/>
<meta property="og:title" content="José e Pilar"/>
<meta property="og:type"
content="article"/>
<meta property="og:url" content="http://www.cinetenisverde.com.br/jose-e-pilar/"/>
<meta property="og:image"
content="/img/logo.svg"/>
<meta property="og:description"
content="Este é um documentário que conta, com imagens caseiras e inusitadas em eventos públicos, os últimos anos do escritor português ganhador do prêmio Nobel José Saramago e sua esposa, Pilar. Por meio de..."/>
<meta property="og:site_name" content="Cine Tênis Verde"/>
<meta property="article:published_time"
content="2010-11-05T00:00:00+00:00"/>
<meta property="article:section" content="post"/>
<meta name="twitter:card" content="summary"/>
<meta name="twitter:site"
content=""/>
<meta name="twitter:title" content="José e Pilar"/>
<meta name="twitter:description"
content="Este é um documentário que conta, com imagens caseiras e inusitadas em eventos públicos, os últimos anos do escritor português ganhador do prêmio Nobel José Saramago e sua esposa, Pilar. Por meio de..."/>
<meta name="twitter:creator"
content=""/>
<meta name="twitter:image:src"
content="/img/logo.svg"/>
<link rel="stylesheet" type="text/css" href="/css/capsule.min.css"/>
<link rel="stylesheet" type="text/css" href="/css/custom.css"/>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-50557403-1', 'auto');
ga('send', 'pageview');
</script>
<link rel="apple-touch-icon" href="/img/apple-touch-icon.png"/>
<link rel="icon" href="/img/favicon.ico"/>
</head>
<body style="min-height:100vh;display:flex;flex-direction:column">
<nav class="navbar has-shadow is-white"
role="navigation" aria-label="main navigation">
<div class="container">
<div class="navbar-brand">
<a class="navbar-item" href="/">
<img alt="Brand" src="/img/brand.svg">
<div class="title is-4"> Cine Tênis Verde</div>
</a>
<label class="button navbar-burger is-white" for="navbar-burger-state">
<span></span>
<span></span>
<span></span>
</label>
</div>
<input type="checkbox" id="navbar-burger-state"/>
<div class="navbar-menu">
<div class="navbar-end">
<a href="/post"
class="navbar-item
">search
</a>
<a href="https://twitter.com/cinetenisverde"
class="navbar-item
">twitter
</a>
<a href="/index.xml"
class="navbar-item
">rss
</a>
</div>
</div>
</div>
</nav>
<section class="section" style="flex:1">
<div class="container">
<p class="title">José e Pilar</p>
<p class="subtitle"><span class="entry-sidebar-stars">
★★★★★
</span>
Wanderley Caloni, <a href="https://github.com/Caloni/cinetenisverde/commits/master/content/post/jose-e-pilar.md">November 5, 2010</a></p>
<p><p>
<div class="content">
<p>Este é um documentário que conta, com imagens caseiras e inusitadas em eventos públicos, os últimos anos do escritor português ganhador do prêmio Nobel José Saramago e sua esposa, Pilar. Por meio de uma semi-intrusão na vida alheia do casal, acompanhamos as viagens que ambos fizeram pelo mundo afora após ele ter ganho o prêmio.</p>
<p>Com esse filme percebemos a dedicação de sua esposa para com a carreira do marido, além do reconhecimento deste que, sem Pilar, não seria nem a metade do que era. Dentro desse ponto de vista é tocante perceber que o filme retrata exatamente esses dois personagens dessa maneira distinta. Se de um lado vemos Pilar ir de um canto ao outro, com uma câmera que denota movimento constante (e como por vezes é percebido ela falando enquanto está dentro de um ônibus ou algo que o valha), do outro lado temos a reflexão e serenidade de Saramago, sempre recheado com seus pensamentos originais e inusitados, ou muitas vezes com uma cena um tanto parada sendo banhada de sabedoria com a narração em off do próprio Saramago e alguma passagem de um de seus livros.</p>
<p>Para os fãs, temos momentos que são especialmente emocionantes, pois acompanhamos também a escrita de um novo livro, do início ao fim, além da recaída na saúde que teve no meio dessa nova obra.</p>
<p>De acordo com o próprio Saramago, tudo é autobiográfico, pois tudo o que fazemos é o que nos define e que faz marcas no mundo, como esse belo documentário demonstra, aparentemente criado de maneira informal e amadora, e que acaba, por isso mesmo, dando uma espontaneidade na história que pouco se vê em documentários mais bem formatados.</p>
<p>É revoltante e chocante a leitura de cartas agredindo o escritor por este ser ateu ou comunista. Muitas, em sua ignorância, desconhecem a própria existência do escritor e sequer sabem se trata-se de um homem ou mulher (como uma carta que o tratava como Sara Mago).</p>
<p>O monólogo sobre sua crença em Deus, logo no início, dentro do carro, é breve e resume completamente tudo que se diz respeito às suas descrenças. Porém, voltamos ao tema uma vez ou outra, pois é uma das pedras no sapato de muita gente.</p>
<p>Os “moinhos de vento” que vemos ao fundo, em uma das imagens iniciais, são os mesmos nas imagens finais, como se o filme quisesse realmente criar um pequeno arco em torno de si, como é testemunhado na cena inicial (dedicatória a Pilar) que é repetido no final (“nos encontramos em outro sítio”). Ou o fato de José se reencontrar com os ossos da pata do elefante que é tema de seu último livro.</p>
<p>Ao mesmo tempo, vemos a opinião do celestial igualmente renegado por sua esposa, de uma forma diferente, mais baseada na ação, característica da personagem: ela explica como a necessidade de estar sempre em movimento na vida, pois a eternidade é algo impossível de cabar em nossa cabeça, e será o que teremos após a morte.</p>
<p>As memória de seu diário, como uma pequena nota a cada dia, são narradas igualmente em off, como passagens de seus livros, e ganham um significado a mais por estarmos vendo cenas exatamente do dia em que ele fez a nota.</p>
<p>O pós-casamento, além de naturalmente emocionante, possui muito mais significado, pois aquele casal já vivera seus 20 anos juntos, e a dedicação de ambos um pelo outro já foi provada diversas vezes; o casamento ganha o significado muito mais enriquecedor de prêmio final do que uma promessa inicial. E o fato de acompanharmos a vida do casal já há mais de uma hora torna essa cena enriquecedora da relação de ambos.</p>
<p>Combinando palavras e imagens de maneira a um complementar o outro, José e Pilar nunca ganha falta de interesse, pois são enriquecido hora pelos textos e pensamentos de Saramago, outras pela agenda lotada que sua esposa insistentemente tenta organizar.</p>
<p>Com uma fotografia pálida, amarelada, talvez prenunciando o fim dos dias do escritos, ou talvez a nostalgia de ter vivido esses últimos momentos. Não se sabe ao certo, mas possui uma certa uniformidade.</p>
<p>Mais uma cena igualmente emocionante, Meirelles e ele vendo o filme juntos.</p>
<a href="https://www.imdb.com/title/tt1789810/mediaviewer/" target="ctvimg">Imagens</a> e créditos no <a title="IMDB: Internet Movie DataBase" href="http://www.imdb.com/title/tt1789810">IMDB</a>.
</div>
<span class="entry-sidebar-stars">
★★★★★
</span>
José e Pilar ● José e Pilar. José e Pilar (Portugal, 2010). Dirigido por Miguel Gonçalves Mendes. Com Joao Afonso, Àngels Barceló, Pilar del Río, Juan Echanove, Gael García Bernal, Gabriel García Márquez, Tarja Halonen, Paco Ibáñez, Tomás Eloy Martínez. ● Nota: 5/5. Categoria: movies. Publicado em 2010-11-05. Texto escrito por Wanderley Caloni.
<p><br>Quer <a href="https://twitter.com/search?q=@cinetenisverde Jos%c3%a9%20e%20Pilar">comentar</a>?<br></p>
</div>
</section>
<section class="section">
<br>
<div class="container">
<div class="is-flex">
<span>
<a class="button">Share</a>
</span>
<span>
<a class="button"
href="https://www.facebook.com/sharer/sharer.php?u=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f">
<span class="icon"><i class="fa fa-facebook"></i></span>
</a>
<a class="button"
href="https://twitter.com/intent/tweet?url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f&text=Jos%c3%a9%20e%20Pilar">
<span class="icon"><i class="fa fa-twitter"></i></span>
</a>
<a class="button"
href="https://news.ycombinator.com/submitlink?u=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f">
<span class="icon"><i class="fa fa-hacker-news"></i></span>
</a>
<a class="button"
href="https://reddit.com/submit?url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f&title=Jos%c3%a9%20e%20Pilar">
<span class="icon"><i class="fa fa-reddit"></i></span>
</a>
<a class="button"
href="https://plus.google.com/share?url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f">
<span class="icon"><i class="fa fa-google-plus"></i></span>
</a>
<a class="button"
href="https://www.linkedin.com/shareArticle?url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f&title=Jos%c3%a9%20e%20Pilar">
<span class="icon"><i class="fa fa-linkedin"></i></span>
</a>
<a class="button"
href="https://www.tumblr.com/widgets/share/tool?canonicalUrl=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f&title=Jos%c3%a9%20e%20Pilar&caption=">
<span class="icon"><i class="fa fa-tumblr"></i></span>
</a>
<a class="button"
href="https://pinterest.com/pin/create/bookmarklet/?media=%2fimg%2flogo.svg&url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f&description=Jos%c3%a9%20e%20Pilar">
<span class="icon"><i class="fa fa-pinterest"></i></span>
</a>
<a class="button"
href="whatsapp://send?text=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f">
<span class="icon"><i class="fa fa-whatsapp"></i></span>
</a>
<a class="button"
href="https://web.skype.com/share?url=http%3a%2f%2fwww.cinetenisverde.com.br%2fjose-e-pilar%2f">
<span class="icon"><i class="fa fa-skype"></i></span>
</a>
</span>
</div>
</div>
<br>
</section>
<footer class="footer">
<div class="container">
<nav class="level">
<div class="level-right has-text-centered">
<div class="level-item">
<a class="button" href="http://www.cinetenisverde.com.br/">
<span class="icon"><i class="fa fa-home"></i></span>
</a>
<a class="button" href="/post">
<span class="icon"><i class="fa fa-search"></i></span>
</a>
<a class="button" href="https://twitter.com/cinetenisverde">
<span class="icon"><i class="fa fa-twitter"></i></span>
</a>
<a class="button" href="/index.xml">
<span class="icon"><i class="fa fa-rss"></i></span>
</a>
</div>
</div>
</nav>
</div>
</footer>
</body>
</html>
| {
"content_hash": "3317b7264e7bdb5da073c0b9d81dd61c",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 761,
"avg_line_length": 42.91290322580645,
"alnum_prop": 0.6462452078478539,
"repo_name": "cinetenisverde/cinetenisverde.github.io",
"id": "887617f01fbea60f5cfe3aa276638415865ba9c5",
"size": "13439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jose-e-pilar/index.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "533"
},
{
"name": "HTML",
"bytes": "31113501"
},
{
"name": "JavaScript",
"bytes": "3266"
},
{
"name": "Python",
"bytes": "2943"
}
],
"symlink_target": ""
} |
<?php declare(strict_types = 1);
/**
* Jyxo PHP Library
*
* LICENSE
*
* This source file is subject to the new BSD license that is bundled
* with this package in the file license.txt.
* It is also available through the world-wide-web at this URL:
* https://github.com/jyxo/php/blob/master/license.txt
*/
namespace Jyxo\Input\Filter;
use function array_filter;
use function is_array;
use function trim;
/**
* Filter for trimming whitespace.
*
* @copyright Copyright (c) 2005-2011 Jyxo, s.r.o.
* @license https://github.com/jyxo/php/blob/master/license.txt
* @author Jan Pěček
* @author Jaroslav Hanslík
*/
class Trim extends AbstractFilter
{
/**
* Filters a value.
*
* @phpcsSuppress SlevomatCodingStandard.TypeHints.ReturnTypeHint.MissingNativeTypeHint
* @param mixed $in Object to be filtered
* @return string This object instance
*/
public function filter($in)
{
$in = parent::filter($in);
// Removes empty values
if (is_array($in)) {
$in = array_filter($in);
}
return $in;
}
/**
* Filters a value.
*
* @phpcsSuppress SlevomatCodingStandard.TypeHints.ParameterTypeHint.MissingNativeTypeHint
* @phpcsSuppress SlevomatCodingStandard.TypeHints.ReturnTypeHint.MissingNativeTypeHint
* @param string $in
* @return string
*/
protected function filterValue($in)
{
return trim((string) $in);
}
}
| {
"content_hash": "a4d882058df450d131865208a5b0870b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 91,
"avg_line_length": 21.634920634920636,
"alnum_prop": 0.7057960381511372,
"repo_name": "jyxo/php",
"id": "b2ebdaf70bf4b731484f54acf1b39623c2f34f6e",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Jyxo/Input/Filter/Trim.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "36916"
},
{
"name": "HTML",
"bytes": "163335"
},
{
"name": "PHP",
"bytes": "652139"
}
],
"symlink_target": ""
} |
package igel.batrak;
import java.util.Collection;
import java.util.Set;
/**
* Represents a set of tasks marked by the same set of tags.
*/
public interface TaskCategory extends TaskSet {
/**
* Returns tags of this category.
*
* @return the set of tags.
*/
public Set<String> tags();
/**
* Returns sub-category.
*
* @param tags an additional tags.
* @return the sub-category of tasks marked both by
* tags of this category and all tags from provided set.
*/
public TaskCategory sub(String... tags);
/**
* Returns sub-category.
*
* @param tags an additional tags.
* @return the sub-category of tasks marked both by
* tags of this category and all tags from provided set.
*/
public TaskCategory sub(Collection<String> tags);
/**
* Returns a set of waiting tasks.
*
* @return the set of tasks having {@link State#CREATED} state.
*/
public TaskSet waiting();
/**
* Returns a set of running tasks.
*
* @return the set of tasks having {@link State#STARTED} state.
*/
public TaskSet running();
/**
* Adds a task listener to this category of tasks.
*
* @param taskListener the task listener to add.
*/
public void addTaskListener(TaskListener<Object, Object> taskListener);
/**
* Adds a task listener to this category of tasks.
* <p/>
* Task listener can be sticky or not.
* If sticky listener is removed it will receive callbacks from
* all tasks started before removal. Non-sticky listener won't
* receive any callbacks after removal.
* <p/>
* Listeners are sticky by default.
*
* @param taskListener the task listener to add.
* @param sticky {@code true} if task listener should be sticky otherwise {@code false}.
*/
public void addTaskListener(TaskListener<Object, Object> taskListener, boolean sticky);
/**
* Removes a task listener from this category of tasks.
*
* @param taskListener the task listener to remove.
*/
public void removeTaskListener(TaskListener<Object, Object> taskListener);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param <Output> type of output.
* @return task handler.
*/
public <Output> TaskHandler<Void, Output> execute(Task<Void, Output> task);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in an input value.
* @param <Input> type of input.
* @param <Output> type of output.
* @return task handler.
*/
public <Input, Output> TaskHandler<Input, Output> execute(Task<Input, Output> task, Input in);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Output> type of output.
* @return task handler.
*/
public <Input1, Input2, Output> TaskHandler<C2<Input1, Input2>, Output> execute(Task<C2<Input1, Input2>, Output> task, Input1 in1, Input2 in2);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param in3 a third input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Input3> type of third input.
* @param <Output> type of output.
* @return task handler.
*/
public <Input1, Input2, Input3, Output> TaskHandler<C3<Input1, Input2, Input3>, Output> execute(Task<C3<Input1, Input2, Input3>, Output> task, Input1 in1, Input2 in2, Input3 in3);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param in3 a third input value.
* @param in4 a fourth input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Input3> type of third input.
* @param <Input4> type of fourth input.
* @param <Output> type of output.
* @return task handler.
*/
public <Input1, Input2, Input3, Input4, Output> TaskHandler<C4<Input1, Input2, Input3, Input4>, Output> execute(Task<C4<Input1, Input2, Input3, Input4>, Output> task, Input1 in1, Input2 in2, Input3 in3, Input4 in4);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in input values.
* @param <Input> type of input values.
* @param <Output> type of output.
* @return task handler.
*/
public <Input, Output> TaskHandler<CN<Input>, Output> execute(Task<CN<Input>, Output> task, Collection<Input> in);
/**
* Starts an execution of task.
*
* @param task the task to execute.
* @param in input values.
* @param <Input> type of input values.
* @param <Output> type of output.
* @return task handler.
*/
@SuppressWarnings("unchecked")
public <Input, Output> TaskHandler<CN<Input>, Output> execute(Task<CN<Input>, Output> task, Input... in);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Output> ExecutionBuilder<Void, Output> prepare(Task<Void, Output> task);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in an input value.
* @param <Input> type of input.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Input, Output> ExecutionBuilder<Input, Output> prepare(Task<Input, Output> task, Input in);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Input1, Input2, Output> ExecutionBuilder<C2<Input1, Input2>, Output> prepare(Task<C2<Input1, Input2>, Output> task, Input1 in1, Input2 in2);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param in3 a third input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Input3> type of third input.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Input1, Input2, Input3, Output> ExecutionBuilder<C3<Input1, Input2, Input3>, Output> prepare(Task<C3<Input1, Input2, Input3>, Output> task, Input1 in1, Input2 in2, Input3 in3);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in1 a first input value.
* @param in2 a second input value.
* @param in3 a third input value.
* @param in4 a fourth input value.
* @param <Input1> type of first input.
* @param <Input2> type of second input.
* @param <Input3> type of third input.
* @param <Input4> type of fourth input.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Input1, Input2, Input3, Input4, Output> ExecutionBuilder<C4<Input1, Input2, Input3, Input4>, Output> prepare(Task<C4<Input1, Input2, Input3, Input4>, Output> task, Input1 in1, Input2 in2, Input3 in3, Input4 in4);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in input values.
* @param <Input> type of input values.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
public <Input, Output> ExecutionBuilder<CN<Input>, Output> prepare(Task<CN<Input>, Output> task, Collection<Input> in);
/**
* Returns execution builder for provided task.
*
* @param task the task to execute.
* @param in input values.
* @param <Input> type of input values.
* @param <Output> type of output.
* @return an {@link ExecutionBuilder} object.
*/
@SuppressWarnings("unchecked")
public <Input, Output> ExecutionBuilder<CN<Input>, Output> prepare(Task<CN<Input>, Output> task, Input... in);
}
| {
"content_hash": "433fcb7293cc8eb2d582b5b121d6ec58",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 224,
"avg_line_length": 34.929961089494164,
"alnum_prop": 0.6153503397571571,
"repo_name": "igeldev/batrak",
"id": "4a450555a700923469c7b52def15987b3892d278",
"size": "9574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batrak-core/src/main/java/igel/batrak/TaskCategory.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "2109"
},
{
"name": "Java",
"bytes": "176253"
}
],
"symlink_target": ""
} |
var loki = require("lokijs");
var db = new loki('db.json');
db.addCollection('top').insert([
{term:'JavaScript' , style :'warning'},
{term:'Angular 2' , style :'danger'},
{term:'NodeJS' , style :'success'},
{term:'ReactJS' , style :'warning'}
]);
db.addCollection('searches');
db.saveDatabase(); | {
"content_hash": "917781a7050a3bc94ed4fcb85951afae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 40,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.6348684210526315,
"repo_name": "hawkesnc/TestPrograms",
"id": "0ab605daf0246230e54045dfae35bba0ad409036",
"size": "304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node/Project/app/database.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "542"
},
{
"name": "HTML",
"bytes": "9201"
},
{
"name": "JavaScript",
"bytes": "13740"
}
],
"symlink_target": ""
} |
package com.creativetrends.app.simplicity.adapters;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import android.widget.Filter;
import android.widget.Filterable;
import androidx.recyclerview.widget.RecyclerView;
import java.util.ArrayList;
public class RecyclerBaseAdapter <VH extends RecyclerView.ViewHolder> extends BaseAdapter implements Filterable {
private final RecyclerView.Adapter<VH> mAdapter;
private final ArrayList<String> mItems = new ArrayList<>();
private final LayoutInflater mInflater;
private String mQueryText;
public RecyclerBaseAdapter(RecyclerView.Adapter<VH> adapter, Context context) {
mAdapter = adapter;
mInflater = LayoutInflater.from(context);
}
@Override
public int getItemViewType(int position) {
return mAdapter.getItemViewType(position);
}
@Override
public int getCount() {
return mAdapter.getItemCount();
}
@Override
public Object getItem(int position) {
// not supported
return null;
}
@Override
public long getItemId(int position) {
return mAdapter.getItemId(position);
}
@SuppressWarnings("unchecked")
@Override
public View getView(int position, View convertView, ViewGroup parent) {
VH holder;
if (convertView == null) {
holder = mAdapter.createViewHolder(parent, getItemViewType(position));
convertView = holder.itemView;
convertView.setTag(holder);
} else {
holder = (VH) convertView.getTag();
}
mAdapter.bindViewHolder(holder, position);
return holder.itemView;
}
@Override
public Filter getFilter() {
// TODO: return a real filter
return null;
}
} | {
"content_hash": "31f7a9621566d2dd5ecbb33367331268",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 113,
"avg_line_length": 27.676470588235293,
"alnum_prop": 0.6843783209351754,
"repo_name": "creativetrendsapps/SimplicityBrowser",
"id": "8acaab3a82bb5fe8a8a3f87dbcd63d277ff8bf42",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/main/java/com/creativetrends/app/simplicity/adapters/RecyclerBaseAdapter.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "240"
},
{
"name": "HTML",
"bytes": "1812"
},
{
"name": "Java",
"bytes": "559532"
}
],
"symlink_target": ""
} |
#ifndef EMS_CMD_H
#define EMS_CMD_H
#include <time.h>
#include "ems.h"
struct romfile {
struct header header;
char *path;
time_t ctime;
};
int checkint();
void catchint();
void restoreint();
void cmd_title(int);
void cmd_delete(int, int, int, char**);
void cmd_format(int, int);
void cmd_restore(int, int, char*, int);
void cmd_dump(int, int, char*, int);
void cmd_write(int, int, int, int, char**);
void cmd_read(int, int, int, char**);
#endif /* EMS_CMD_H */
| {
"content_hash": "accd2f4c798489d2a2021447aa5653f8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 43,
"avg_line_length": 19.875,
"alnum_prop": 0.649895178197065,
"repo_name": "mikeryan/ems-flasher",
"id": "5e26f745097b822041ff7d21bf697a9c4d28b6ac",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmd.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "5015"
},
{
"name": "C",
"bytes": "148476"
},
{
"name": "Makefile",
"bytes": "1366"
},
{
"name": "Roff",
"bytes": "4580"
},
{
"name": "Shell",
"bytes": "11840"
}
],
"symlink_target": ""
} |
// @file data.hpp
// @brief Basic data structures
// @author Andrea Vedaldi
#ifndef __vl_data_hpp__
#define __vl_data_hpp__
#include <cstddef>
#include <string>
#define STRINGIZE(x) STRINGIZE_HELPER(x)
#define STRINGIZE_HELPER(x) #x
#define FILELINE STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__)
namespace vl {
typedef int index_t ;
enum Device { CPU = 0, GPU } ;
enum Type {
vlTypeChar,
vlTypeFloat,
vlTypeDouble
} ;
enum Error {
vlSuccess = 0,
vlErrorUnsupported,
vlErrorCuda,
vlErrorCudnn,
vlErrorCublas,
vlErrorOutOfMemory,
vlErrorOutOfGPUMemeory,
vlErrorUnknown
} ;
const char * getErrorMessage(Error error) ;
class CudaHelper ;
/* -----------------------------------------------------------------
* Helpers
* -------------------------------------------------------------- */
inline int divideUpwards(int a, int b)
{
return (a + b - 1) / b ;
}
namespace impl {
class Buffer
{
public:
Buffer() ;
vl::Error init(Device deviceType, Type dataType, size_t size) ;
void * getMemory() ;
int getNumReallocations() const ;
void clear() ;
void invalidateGpu() ;
protected:
Device deviceType ;
Type dataType ;
size_t size ;
void * memory ;
int numReallocations ;
} ;
}
/* -----------------------------------------------------------------
* Context
* -------------------------------------------------------------- */
class Context
{
public:
Context() ;
~Context() ;
void * getWorkspace(Device device, size_t size) ;
void clearWorkspace(Device device) ;
void * getAllOnes(Device device, Type type, size_t size) ;
void clearAllOnes(Device device) ;
CudaHelper& getCudaHelper() ;
void clear() ; // do a reset
void invalidateGpu() ; // drop CUDA memory and handles
vl::Error passError(vl::Error error, char const * message = NULL) ;
vl::Error setError(vl::Error error, char const * message = NULL) ;
void resetLastError() ;
vl::Error getLastError() const ;
std::string const& getLastErrorMessage() const ;
private:
impl::Buffer workspace[2] ;
impl::Buffer allOnes[2] ;
Error lastError ;
std::string lastErrorMessage ;
CudaHelper * cudaHelper ;
} ;
/* -----------------------------------------------------------------
* TensorGeometry
* -------------------------------------------------------------- */
class TensorGeometry
{
public:
TensorGeometry() ;
TensorGeometry(index_t height, index_t width, index_t depth, index_t size) ;
index_t getHeight() const ;
index_t getWidth() const ;
index_t getDepth() const ;
index_t getSize() const ;
index_t getNumElements() const ;
bool isEmpty() const ;
protected:
index_t height ;
index_t width ;
index_t depth ;
index_t size ;
} ;
inline bool operator == (TensorGeometry const & a, TensorGeometry const & b)
{
return
(a.getHeight() == b.getHeight()) &
(a.getWidth() == b.getWidth()) &
(a.getDepth() == b.getDepth()) &
(a.getSize() == b.getSize()) ;
}
inline bool operator != (TensorGeometry const & a, TensorGeometry const & b)
{
return ! (a == b) ;
}
/* -----------------------------------------------------------------
* Tensor
* -------------------------------------------------------------- */
class Tensor : public TensorGeometry
{
public:
Tensor() ;
Tensor(float * memory, size_t memorySize, Device memoryType,
TensorGeometry const & geom) ;
float * getMemory() ;
Device getMemoryType() const ;
TensorGeometry getGeometry() const ;
operator bool() const ;
bool isNull() const ;
protected:
float * memory ;
size_t memorySize ;
Device memoryType ;
} ;
inline bool areCompatible(Tensor const & a, Tensor const & b)
{
return
(a.isEmpty() || a.isNull()) ||
(b.isEmpty() || b.isNull()) ||
(a.getMemoryType() == b.getMemoryType()) ;
}
}
#endif
| {
"content_hash": "a3024c87777e4437313143f613c34114",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 80,
"avg_line_length": 23.719298245614034,
"alnum_prop": 0.5290927021696252,
"repo_name": "gcheron/P-CNN",
"id": "b5d4ee88358d1f535101d9f6ed4c32b7e697e4e6",
"size": "4239",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "matconvnet-1.0-beta11/matlab/src/bits/data.hpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21146"
},
{
"name": "C++",
"bytes": "156879"
},
{
"name": "CSS",
"bytes": "2552"
},
{
"name": "Cuda",
"bytes": "153983"
},
{
"name": "JavaScript",
"bytes": "138"
},
{
"name": "Makefile",
"bytes": "9360"
},
{
"name": "Matlab",
"bytes": "194730"
},
{
"name": "Python",
"bytes": "32339"
},
{
"name": "Shell",
"bytes": "18204"
},
{
"name": "TeX",
"bytes": "60325"
}
],
"symlink_target": ""
} |
FOUNDATION_EXPORT double KingBaseCategoryVersionNumber;
FOUNDATION_EXPORT const unsigned char KingBaseCategoryVersionString[];
| {
"content_hash": "b7e60a0536acdb78ab1480fb70c6f7b1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 70,
"avg_line_length": 42.666666666666664,
"alnum_prop": 0.8828125,
"repo_name": "hengyangKing/MHPhotoBroswer",
"id": "7537b9db971838f8858e1a7523d60506bca1b01d",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Example/Pods/Target Support Files/KingBaseCategory/KingBaseCategory-umbrella.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "970"
},
{
"name": "Objective-C",
"bytes": "196736"
},
{
"name": "Ruby",
"bytes": "6541"
}
],
"symlink_target": ""
} |
layout: docwithnav
title: "kubectl port-forward"
---
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
## kubectl port-forward
Forward one or more local ports to a pod.
### Synopsis
Forward one or more local ports to a pod.
```
{% raw %}
kubectl port-forward -p POD_NAME [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]
{% endraw %}
```
### Examples
```
{% raw %}
// listens on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
$ kubectl port-forward -p mypod 5000 6000
// listens on port 8888 locally, forwarding to 5000 in the pod
$ kubectl port-forward -p mypod 8888:5000
// listens on a random port locally, forwarding to 5000 in the pod
$ kubectl port-forward -p mypod :5000
// listens on a random port locally, forwarding to 5000 in the pod
$ kubectl port-forward -p mypod 0:5000
{% endraw %}
```
### Options
```
{% raw %}
-h, --help=false: help for port-forward
-p, --pod="": Pod name
{% endraw %}
```
### Options inherited from parent commands
```
{% raw %}
--alsologtostderr=false: log to standard error as well as files
--api-version="": The API version to use when talking to the server
--certificate-authority="": Path to a cert. file for the certificate authority.
--client-certificate="": Path to a client key file for TLS.
--client-key="": Path to a client key file for TLS.
--cluster="": The name of the kubeconfig cluster to use
--context="": The name of the kubeconfig context to use
--insecure-skip-tls-verify=false: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.
--kubeconfig="": Path to the kubeconfig file to use for CLI requests.
--log-backtrace-at=:0: when logging hits line file:N, emit a stack trace
--log-dir=: If non-empty, write log files in this directory
--log-flush-frequency=5s: Maximum number of seconds between log flushes
--logtostderr=true: log to standard error instead of files
--match-server-version=false: Require server version to match client version
--namespace="": If present, the namespace scope for this CLI request.
--password="": Password for basic authentication to the API server.
-s, --server="": The address and port of the Kubernetes API server
--stderrthreshold=2: logs at or above this threshold go to stderr
--token="": Bearer token for authentication to the API server.
--user="": The name of the kubeconfig user to use
--username="": Username for basic authentication to the API server.
--v=0: log level for V logs
--validate=false: If true, use a schema to validate the input before sending it
--vmodule=: comma-separated list of pattern=N settings for file-filtered logging
{% endraw %}
```
### SEE ALSO
* [kubectl](kubectl.html) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-21 06:57:19.12265922 +0000 UTC
<!-- BEGIN MUNGE: IS_VERSIONED -->
<!-- TAG IS_VERSIONED -->
<!-- END MUNGE: IS_VERSIONED -->
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| {
"content_hash": "97d7cd4f8af054ff07c4858ad4562a18",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 155,
"avg_line_length": 34.44329896907217,
"alnum_prop": 0.6869200838072433,
"repo_name": "caesarxuchao/caesarxuchao.github.io",
"id": "b4f13a831b36baafb768d40069e048853d96b005",
"size": "3345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_v1.0/docs/user-guide/kubectl/kubectl_port-forward.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "65501"
},
{
"name": "Go",
"bytes": "90066"
},
{
"name": "Groff",
"bytes": "241426"
},
{
"name": "HTML",
"bytes": "6413405"
},
{
"name": "Java",
"bytes": "11302"
},
{
"name": "JavaScript",
"bytes": "48163"
},
{
"name": "Logos",
"bytes": "14236"
},
{
"name": "Makefile",
"bytes": "7935"
},
{
"name": "PHP",
"bytes": "1797"
},
{
"name": "Python",
"bytes": "8646"
},
{
"name": "Ruby",
"bytes": "8044"
},
{
"name": "Shell",
"bytes": "142127"
}
],
"symlink_target": ""
} |
package com.google.sps.servlets;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
import org.junit.Before;
import org.junit.Test;
import org.junit.After;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import java.io.PrintWriter;
import java.io.StringWriter;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.appengine.tools.development.testing.LocalServiceTestHelper;
import com.google.appengine.tools.development.testing.LocalBlobstoreServiceTestConfig;
@RunWith(JUnit4.class)
public final class UploadOnBlobstoreServletTest {
private final LocalServiceTestHelper helper =
new LocalServiceTestHelper(new LocalBlobstoreServiceTestConfig());
private HttpServletRequest mockRequest;
private HttpServletResponse mockResponse;
private StringWriter responseWriter;
private UploadOnBlobstoreServlet servlet;
@Before
public void setUp() throws Exception {
helper.setUp();
servlet = new UploadOnBlobstoreServlet();
mockRequest = mock(HttpServletRequest.class);
mockResponse = mock(HttpServletResponse.class);
// Set up a fake HTTP response
responseWriter = new StringWriter();
when(mockResponse.getWriter()).thenReturn(new PrintWriter(responseWriter));
}
@After
public void tearDown() throws Exception {
helper.tearDown();
}
@Test
public void getBlobstoreUploadUrl() throws Exception {
// Blobstore url changes during local testing
// So we're just checking if there is a response
String blobstoreHeaderUrl = "http://localhost:8080/_ah/upload/";
servlet.doGet(mockRequest, mockResponse);
String response = responseWriter.toString();
assertTrue(response.contains(blobstoreHeaderUrl));
}
}
| {
"content_hash": "0b3553598e480253917baf113fbb7d3e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 30.406779661016948,
"alnum_prop": 0.778149386845039,
"repo_name": "google/step197-2020",
"id": "b3e601d06dfb28030abd500772093ff215f59d11",
"size": "1794",
"binary": false,
"copies": "1",
"ref": "refs/heads/Setup",
"path": "src/test/java/com/google/sps/servlets/UploadOnBlobstoreServletTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7336"
},
{
"name": "HTML",
"bytes": "219"
},
{
"name": "Java",
"bytes": "119195"
},
{
"name": "JavaScript",
"bytes": "67779"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:orientation="vertical">
<RelativeLayout
android:id="@+id/container_rellative"
android:layout_width="match_parent"
android:layout_height="wrap_content">
</RelativeLayout>
</LinearLayout> | {
"content_hash": "6234ab9e6bb443d3854a365a21164910",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 30.357142857142858,
"alnum_prop": 0.6941176470588235,
"repo_name": "WildanGarviandi/Bararaga",
"id": "ea927f76cdf80cdb9976c39fd109a58cedc2c63f",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/main/res/layout/auth_activity.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "99768"
}
],
"symlink_target": ""
} |
require 'helper'
require 'mocha'
require 'json'
class TestSegment < Test::Unit::TestCase
def setup
@s = StravaApi::Base.new
api_result = JSON.parse segments_index_json
api_result.stubs(:parsed_response).returns("")
StravaApi::Base.stubs(:get).with('/segments', {:query => {:name => 'Hawk Hill'}}).returns(api_result)
@segment = @s.segments("Hawk Hill").first
end
def test_show
api_result = JSON.parse segment_show_json
api_result.stubs(:parsed_response).returns("")
StravaApi::Base.stubs(:get).with('/segments/99243', { :query => {} }).returns(api_result)
assert @segment.id == 99243
assert @segment.name == "Hawk Hill Saddle"
assert @segment.elevation_gain.nil?
assert @segment.elevation_high.nil?
result = @segment.show
assert result.is_a?(StravaApi::Segment)
assert @segment.id == 99243
assert @segment.name == "Hawk Hill Saddle"
assert @segment.elevation_gain == 76.553
assert @segment.elevation_high == 172.694
end
def test_efforts
api_result = JSON.parse segment_efforts_index_json
api_result.stubs(:parsed_response).returns("")
StravaApi::Base.stubs(:get).with('/segments/99243/efforts', { :query => {} }).returns(api_result)
efforts = @segment.efforts
assert efforts.is_a?(Array)
assert efforts.size == 3
efforts.each do |effort|
assert effort.is_a?(StravaApi::Effort)
end
end
def test_efforts_with_athlete_id
api_result = JSON.parse segment_efforts_index_by_athlete_id_json
api_result.stubs(:parsed_response).returns("")
StravaApi::Base.stubs(:get).with('/segments/99243/efforts', { :query => {'athleteId' => 1377} }).returns(api_result)
efforts = @segment.efforts(:athlete_id => 1377)
assert efforts.is_a?(Array)
assert efforts.size == 17
efforts.each do |effort|
assert effort.is_a?(StravaApi::Effort)
end
end
end | {
"content_hash": "ba03e2096753686935cfd27507f8192b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 120,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6588785046728972,
"repo_name": "stevenchanin/strava-api",
"id": "4279a938dbd0e15ac8f4645d0eeb6d6e07754ad5",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_segment.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "104786"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import inspect
import io
import time
import unittest
import mock
from pants.java.nailgun_io import NailgunStreamWriter
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
PATCH_OPTS = dict(autospec=True, spec_set=True)
class TestNailgunStreamWriter(unittest.TestCase):
def setUp(self):
self.in_fd = -1
self.mock_socket = mock.Mock()
self.writer = NailgunStreamWriter(
(self.in_fd,),
self.mock_socket,
(ChunkType.STDIN,),
ChunkType.STDIN_EOF
)
def test_stop(self):
self.assertFalse(self.writer.is_stopped)
self.writer.stop()
self.assertTrue(self.writer.is_stopped)
self.writer.run()
def test_startable(self):
self.assertTrue(inspect.ismethod(self.writer.start))
@mock.patch('select.select')
def test_run_stop_on_error(self, mock_select):
mock_select.return_value = ([], [], [self.in_fd])
self.writer.run()
self.assertFalse(self.writer.is_alive())
self.assertEquals(mock_select.call_count, 1)
@mock.patch('os.read')
@mock.patch('select.select')
@mock.patch.object(NailgunProtocol, 'write_chunk')
def test_run_read_write(self, mock_writer, mock_select, mock_read):
mock_select.side_effect = [
([self.in_fd], [], []),
([self.in_fd], [], [])
]
mock_read.side_effect = [
b'A' * 300,
b'' # Simulate EOF.
]
# Exercise NailgunStreamWriter.running() and .run() simultaneously.
inc = 0
with self.writer.running():
while self.writer.is_alive():
time.sleep(0.01)
inc += 1
if inc >= 1000:
raise Exception('waited too long.')
self.assertFalse(self.writer.is_alive())
mock_read.assert_called_with(-1, io.DEFAULT_BUFFER_SIZE)
self.assertEquals(mock_read.call_count, 2)
mock_writer.assert_has_calls([
mock.call(mock.ANY, ChunkType.STDIN, b'A' * 300),
mock.call(mock.ANY, ChunkType.STDIN_EOF)
])
| {
"content_hash": "26209b7bc6a64a2023b4237b11e3c635",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 93,
"avg_line_length": 27.813333333333333,
"alnum_prop": 0.6500479386385427,
"repo_name": "UnrememberMe/pants",
"id": "7d7888f702b16bb154ae790282186ae1bf950830",
"size": "2233",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/java/test_nailgun_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "48321"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5767085"
},
{
"name": "Rust",
"bytes": "427157"
},
{
"name": "Scala",
"bytes": "75938"
},
{
"name": "Shell",
"bytes": "75470"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
/*************************************************************
*
* MathJax/localization/lb/TeX.js
*
* Copyright (c) 2009-2018 The MathJax Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
MathJax.Localization.addTranslation("lb", "TeX", {
version: "2.7.5",
isLoaded: true,
strings: {}
});
MathJax.Ajax.loadComplete("[MathJax]/localization/lb/TeX.js");
| {
"content_hash": "44f82660d19c9f9efe0142a464307e6a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 32.17857142857143,
"alnum_prop": 0.6614872364039955,
"repo_name": "GerHobbelt/MathJax",
"id": "9bf359cfe626f759ef8e11cd133752ac2b7231ac",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "localization/lb/TeX.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12610385"
},
{
"name": "JavaScript",
"bytes": "50653811"
}
],
"symlink_target": ""
} |
package util
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/lxc/lxd/shared"
)
// LoadModule loads the kernel module with the given name, by invoking
// modprobe.
func LoadModule(module string) error {
if shared.PathExists(fmt.Sprintf("/sys/module/%s", module)) {
return nil
}
_, err := shared.RunCommand("modprobe", module)
return err
}
// HasFilesystem checks whether a given filesystem is already supported
// by the kernel. Note that if the filesystem is a module, you may need to
// load it first.
func HasFilesystem(filesystem string) bool {
file, err := os.Open("/proc/filesystems")
if err != nil {
return false
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
fields := strings.Fields(strings.TrimSpace(scanner.Text()))
entry := fields[len(fields)-1]
if entry == filesystem {
return true
}
}
return false
}
// HugepagesPath attempts to locate the mount point of the hugepages filesystem.
func HugepagesPath() (string, error) {
// Find the source mount of the path
file, err := os.Open("/proc/mounts")
if err != nil {
return "", err
}
defer file.Close()
scanner := bufio.NewScanner(file)
matches := []string{}
for scanner.Scan() {
line := scanner.Text()
cols := strings.Fields(line)
if cols[2] == "hugetlbfs" {
matches = append(matches, cols[1])
}
}
if len(matches) == 0 {
return "", fmt.Errorf("No hugetlbfs mount found, can't use hugepages")
}
if len(matches) > 1 {
if shared.StringInSlice("/dev/hugepages", matches) {
return "/dev/hugepages", nil
}
return "", fmt.Errorf("More than one hugetlbfs instance found and none at standard /dev/hugepages")
}
return matches[0], nil
}
| {
"content_hash": "4625be9fb9a423e280589bb26e340b97",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 101,
"avg_line_length": 21.518987341772153,
"alnum_prop": 0.6776470588235294,
"repo_name": "mjeanson/lxd",
"id": "4019477af7b2000f5b0b57aa09077afb20e0916a",
"size": "1700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lxd/util/kernel.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "798844"
},
{
"name": "Makefile",
"bytes": "2926"
},
{
"name": "Protocol Buffer",
"bytes": "711"
},
{
"name": "Python",
"bytes": "47382"
},
{
"name": "Shell",
"bytes": "65418"
}
],
"symlink_target": ""
} |
package xerrors
import (
"errors"
"fmt"
"reflect"
)
func getError(unknown interface{}) error {
return findErrorInValue(reflect.ValueOf(unknown))
}
// findErrorInValue will try and genereate and error object either from the object itself if it can be created or its
// fields.
func findErrorInValue(v reflect.Value) error {
if v.Kind() == reflect.Invalid {
return nil
}
if v.CanInterface() {
// the object can be created, hopefully we can cast it to an error.
if _, ok := v.Interface().(*error); ok {
return findErrorInValue(v.Elem())
} else if err, ok := v.Interface().(error); ok {
if v.Type().String() == "*errors.errorString" {
return err
}
if v.Kind() == reflect.Interface {
return err
} else if v.Kind() == reflect.Ptr {
return findErrorInValue(v.Elem())
}
return err
} else if v.Kind() == reflect.Ptr { // We can't cast it to an error, check the pointer.
return findErrorInValue(v.Elem())
} else if v.Kind() == reflect.Struct { // We can't cast it to an error, check the fields.
return iterateFields(v)
}
} else if v.Kind() == reflect.Ptr && v.Type().String() == "*errors.errorString" { // the base case. AKA errors.New("error")
return errors.New(fmt.Sprintf("%s", v.Elem().Field(0)))
} else if v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { // try what the object contains.
return findErrorInValue(v.Elem())
} else if v.Kind() == reflect.Struct { // try the fields
return iterateFields(v)
}
// an error could not be created
return nil
}
// FirstCause will find the first cause of the error. This is making the assumption that the errors are cascaded.
// aka an Error struct will contain an error and so on.
func FirstCause(err error) error {
return findRootError(err)
}
// findRootError will iterate over all the fields and the fields of those fields to generate an error. If no error is
// found <nil> will be returned
func findRootError(unknown interface{}) error {
if unknown == nil {
return nil
}
v := reflect.ValueOf(unknown)
currentLevel := findErrorInValue(v)
var tempErr error
if v.Kind() == reflect.Ptr { // check if itself and then the fields
e := findErrorInValue(v.Elem())
if e != nil {
e = findRootError(e)
if e != nil {
tempErr = e
}
}
} else if v.Kind() == reflect.Struct { // check the fields
e := iterateFields(v)
if e != nil {
e = findRootError(e)
if e != nil {
tempErr = e
}
}
}
if tempErr != nil {
return tempErr
}
return currentLevel
}
// iterateFields iterates over the variables of the struct and tries to create an error object from the field.
// :warning: only can be called where v.Kind() == reflect.Struct
func iterateFields(v reflect.Value) error {
var tempErr error
for i := 0; i < v.NumField(); i++ {
f := v.Field(i)
e := findErrorInValue(f)
if e != nil {
tempErr = e
}
if f.CanInterface() { // try and create an error if we can turn it into an interface{}
e = findRootError(f.Interface())
if e != nil {
tempErr = e
}
} else if f.Kind() == reflect.Interface { // try the object the field contains
f = f.Elem()
e = findErrorInValue(f)
if e != nil {
tempErr = e
}
}
}
return tempErr
}
| {
"content_hash": "7f2bc39683d0114f5d9469e69dca614c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 124,
"avg_line_length": 27.07563025210084,
"alnum_prop": 0.654562383612663,
"repo_name": "Comcast/webpa-common",
"id": "71635a177582f8f83fc9368bb4f2c02179bfa191",
"size": "3222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xerrors/xerrors.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1500712"
},
{
"name": "Shell",
"bytes": "632"
}
],
"symlink_target": ""
} |
<div class="content">
<div id="example_title">
<h1>Escape/Unescape ID</h1>
Element ID in DOM cannot have certain characters. If you build ids from a variable name (as it is done in w2ui
grid/form, etc), you should sanitize it. It is where these two functions come.
</div>
<div id="example_view"></div>
<div id="example_code"></div>
</div>
<!--CODE-->
<input id="encode" class="w2ui-input" value="id-width-hash#space dot.comma,slash\" style="width: 300px">
<br><br>
<button class="w2ui-btn" onclick="escape()">Escape ID</button>
<button class="w2ui-btn" onclick="unescape()">Unescape ID</button>
<!--CODE-->
<script type="module">
import { w2utils, query } from '__W2UI_PATH__'
window.escape = function() {
let txt = query('#encode')[0].value
query('#encode').val(w2utils.escapeId(txt))
}
window.unescape = function() {
let txt = query('#encode')[0].value
query('#encode').val(w2utils.unescapeId(txt))
}
</script> | {
"content_hash": "b815eac421ef98d4e121c1da4ea027af",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 118,
"avg_line_length": 32.4,
"alnum_prop": 0.6491769547325102,
"repo_name": "vitmalina/w2ui",
"id": "3819f7ad0c80eb4ea0c39a7e7c7125c1495486d4",
"size": "972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/examples/utils/18.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25750"
},
{
"name": "HTML",
"bytes": "184623"
},
{
"name": "Hack",
"bytes": "4647"
},
{
"name": "Java",
"bytes": "213640"
},
{
"name": "JavaScript",
"bytes": "1565985"
},
{
"name": "Less",
"bytes": "255571"
},
{
"name": "PHP",
"bytes": "23924"
},
{
"name": "Python",
"bytes": "182361"
},
{
"name": "Ruby",
"bytes": "5461"
},
{
"name": "Shell",
"bytes": "1567"
}
],
"symlink_target": ""
} |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2013.11.29 at 12:35:53 PM GMT
//
package org.mule.modules.hybris.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.datatype.XMLGregorianCalendar;
/**
* <p>Java class for b2BCommentDTO complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="b2BCommentDTO">
* <complexContent>
* <extension base="{}itemDTO">
* <sequence>
* <element name="code" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="comment" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="modifiedDate" type="{http://www.w3.org/2001/XMLSchema}dateTime" minOccurs="0"/>
* <element name="order" type="{}abstractOrderDTO" minOccurs="0"/>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "b2BCommentDTO", propOrder = {
"code",
"comment",
"modifiedDate",
"order"
})
public class B2BCommentDTO
extends ItemDTO
{
protected String code;
protected String comment;
@XmlSchemaType(name = "dateTime")
protected XMLGregorianCalendar modifiedDate;
protected AbstractOrderDTO order;
/**
* Gets the value of the code property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getCode() {
return code;
}
/**
* Sets the value of the code property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setCode(String value) {
this.code = value;
}
/**
* Gets the value of the comment property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getComment() {
return comment;
}
/**
* Sets the value of the comment property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setComment(String value) {
this.comment = value;
}
/**
* Gets the value of the modifiedDate property.
*
* @return
* possible object is
* {@link XMLGregorianCalendar }
*
*/
public XMLGregorianCalendar getModifiedDate() {
return modifiedDate;
}
/**
* Sets the value of the modifiedDate property.
*
* @param value
* allowed object is
* {@link XMLGregorianCalendar }
*
*/
public void setModifiedDate(XMLGregorianCalendar value) {
this.modifiedDate = value;
}
/**
* Gets the value of the order property.
*
* @return
* possible object is
* {@link AbstractOrderDTO }
*
*/
public AbstractOrderDTO getOrder() {
return order;
}
/**
* Sets the value of the order property.
*
* @param value
* allowed object is
* {@link AbstractOrderDTO }
*
*/
public void setOrder(AbstractOrderDTO value) {
this.order = value;
}
}
| {
"content_hash": "13ce8b8b32e9cc3f173d50f60c81f5f2",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 111,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.5876980929358044,
"repo_name": "ryandcarter/hybris-connector",
"id": "82c2271672271e16ee520ef9569bf2854711b72e",
"size": "3723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/java/org/mule/modules/hybris/model/B2BCommentDTO.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "7702461"
}
],
"symlink_target": ""
} |
local YwDeclare = YwDeclare
local YwClass = YwClass
local DLog = YwDebug.Log
local DLogWarn = YwDebug.LogWarning
local DLogError = YwDebug.LogError
-- Register new class LgBomb.
local strClassName = "LgBomb"
local LgBomb = YwDeclare(strClassName, YwClass(strClassName))
-- Member variables.
-- The c# class object.
LgBomb.this = false
-- The transform.
LgBomb.transform = false
-- The c# gameObject.
LgBomb.gameObject = false
-- Public.
-- Prefab of explosion effect.
LgBomb.m_cExplosion = false
-- Private.
-- Reference to the player's LayBombs script.
LgBomb.m_cLayBombs = false
-- Reference to the PickupSpawner script.
LgBomb.m_cPickupSpawner = false
-- Reference to the particle system of the explosion effect.
LgBomb.m_cExplosionFx = false
-- Awake method.
function LgBomb:Awake()
--print("LgBomb:Awake")
-- Check variable.
if (not self.this) or (not self.transform) or (not self.gameObject) then
DLogError("Init error in LgBomb!")
return
end
self.m_cExplosionFx = GameObject.FindGameObjectWithTag("ExplosionFX"):GetComponent(ParticleSystem)
self.m_cPickupSpawner = GameObject.Find("pickupManager"):GetComponent(PickupSpawner)
if GameObject.FindGameObjectWithTag("Player") then
self.m_cLayBombs = GameObject.FindGameObjectWithTag("Player"):GetComponent(LayBombs)
end
end
-- Start method.
function LgBomb:Start()
--print("LgBomb:Start")
--If the bomb has no parent, it has been laid by the player and should detonate.
if self.transform.root == self.transform then
self:BombDetonation()
end
end
-- Explode.
function LgBomb:Explode()
--print("LgBomb:Explode")
local this = self.this
-- The player is now free to lay bombs when he has them.
self.m_cLayBombs.BombLaid = false
-- Make the pickup spawner start to deliver a new pickup.
self.m_cPickupSpawner:DeliverPickup()
-- Find all the colliders on the Enemies layer within the bombRadius.
local aEnemies = Physics2D.OverlapCircleAll(self.transform.position, this.m_bombRadius, 1 << LayerMask.NameToLayer("Enemies"))
-- For each collider...
for _, cEnemy in pairs(aEnemies) do
-- Check if it has a rigidbody (since there is only one per enemy, on the parent).
local cRb = cEnemy:GetComponent(Rigidbody2D)
if cRb and ("Enemy" == cRb.tag) then
-- Find the Enemy script and set the enemy's health to zero.
cRb.gameObject:GetComponent(Enemy).m_HP = 0
-- Find a vector from the bomb to the enemy.
local vDeltaPos = cRb.transform.position - self.transform.position
-- Apply a force in this direction with a magnitude of bombForce.
local vForce = vDeltaPos.normalized * this.m_bombForce
cRb:AddForce(vForce)
end
end
-- Set the explosion effect's position to the bomb's position and play the particle system.
self.m_cExplosionFx.transform.position = self.transform.position
self.m_cExplosionFx:Play()
-- Instantiate the explosion prefab.
GameObject.Instantiate(this.m_explosion, self.transform.position, Quaternion.identity)
-- Play the explosion sound effect.
AudioSource.PlayClipAtPoint(this.m_boom, self.transform.position)
-- Destroy the bomb.
GameObject.Destroy(self.gameObject)
end
-- LgBomb detonation.
function LgBomb:BombDetonation()
--print("LgBomb:BombDetonation")
-- Check the validation.
if Slua.IsNull(self.gameObject) then
return
end
-- LgBomb detonation.
local cCo = coroutine.create(function ()
-- Play the fuse seconds.
AudioSource.PlayClipAtPoint(self.this.m_fuse, self.transform.position)
-- Wait for 2 seconds.
Yield(WaitForSeconds(self.this.m_fuseTime))
-- Check the validation.
if Slua.IsNull(self.gameObject) then
return
end
-- Explode the bomb.
self:Explode()
end)
coroutine.resume(cCo)
end
-- Return this class.
return LgBomb
| {
"content_hash": "8c30598905989808ef55a41b40bbdd53",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 130,
"avg_line_length": 28.602836879432623,
"alnum_prop": 0.691792710141334,
"repo_name": "zhutaorun/2DPlatformer-SLua",
"id": "0d093d55a94f8e3188972a2f60b1a63fb9b056ab",
"size": "4270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assets/StreamingAssets/Lua/Logic/LgBomb.lua",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11975"
},
{
"name": "C#",
"bytes": "2768216"
},
{
"name": "Lua",
"bytes": "161777"
}
],
"symlink_target": ""
} |
package agent
import (
"crypto/rand"
"errors"
"net"
"github.com/AdRoll/hologram/log"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
var (
// Not sure if this needs a mutex around it. Probably not, because it only gets written once by one thing.
socketAddress string
successfulKey *agent.Key
providedSSHKey ssh.Signer
errNoKeys = errors.New("No keys available in ssh-agent")
errSSHKey = errors.New("Could not use the provided SSH key.")
)
func SSHSetAgentSock(socketAddressFromCli string, sshKeyFromCli []byte) {
socketAddress = socketAddressFromCli
if sshKeyFromCli != nil {
sshKey, keyErr := ssh.ParsePrivateKey(sshKeyFromCli)
if keyErr != nil {
log.Errorf("Could not parse SSH key given by the CLI. %s", keyErr)
} else {
providedSSHKey = sshKey
}
}
}
// SSHSign signs the provided challenge using a key from the ssh-agent keyring. The key is chosen by enumerating all
// keys, then skipping the requested number of keys.
func SSHSign(challenge []byte, skip int) (*ssh.Signature, error) {
var signer ssh.Signer
if socketAddress == "" {
// Do not infinitely loop trying to use our provided SSH key.
if skip > 0 {
return nil, errSSHKey
}
log.Debug("Falling back on provided SSH key.")
if providedSSHKey == nil {
return nil, errSSHKey
}
signer = providedSSHKey
} else {
c, err := net.Dial("unix", socketAddress)
if err != nil {
return nil, err
}
agent := agent.NewClient(c)
keys, err := agent.List()
if err != nil {
return nil, err
}
if len(keys) == 0 {
return nil, errNoKeys
}
if skip >= len(keys) {
// indicate that we've tried everything and exhausted the keyring
return nil, nil
}
signers, getSignersErr := agent.Signers()
if getSignersErr != nil {
return nil, getSignersErr
}
signer = signers[skip]
}
sig, err := signer.Sign(rand.Reader, challenge)
return sig, err
}
| {
"content_hash": "bfd1d28b00f1ab4da083ba9dd95292b0",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 116,
"avg_line_length": 23.317073170731707,
"alnum_prop": 0.6846234309623431,
"repo_name": "AdRoll/hologram",
"id": "e3ab2eac6b7e060754806c4196ec25b7393a1194",
"size": "2498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/ssh.go",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2302"
},
{
"name": "Go",
"bytes": "148289"
},
{
"name": "Shell",
"bytes": "13604"
}
],
"symlink_target": ""
} |
package com.oracle.xmlns.apps.crmcommon.salesparties.salespartiesservice.types;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "")
@XmlRootElement(name = "createPartyUsageResponse")
public class CreatePartyUsageResponse {
}
| {
"content_hash": "3398f481d98b05c075eb07b260573ea3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 95,
"avg_line_length": 25.91176470588235,
"alnum_prop": 0.6912599318955732,
"repo_name": "dushmis/Oracle-Cloud",
"id": "cf6d20432043b2a518bcd7c297588e616021f2a2",
"size": "881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PaaS_SaaS_Accelerator_RESTFulFacade/FusionProxy_SalesPartyService/src/com/oracle/xmlns/apps/crmcommon/salesparties/salespartiesservice/types/CreatePartyUsageResponse.java",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "810"
},
{
"name": "C#",
"bytes": "2922162"
},
{
"name": "CSS",
"bytes": "405502"
},
{
"name": "HTML",
"bytes": "2487"
},
{
"name": "Java",
"bytes": "33061859"
},
{
"name": "JavaScript",
"bytes": "43011"
},
{
"name": "PLSQL",
"bytes": "48918"
}
],
"symlink_target": ""
} |
import time
import re
import keyword
import builtins
from tkinter import *
from idlelib.Delegator import Delegator
from idlelib.configHandler import idleConf
DEBUG = False
def any(name, alternates):
"Return a named group pattern matching list of alternates."
return "(?P<%s>" % name + "|".join(alternates) + ")"
def make_pat():
kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b"
builtinlist = [str(name) for name in dir(builtins)
if not name.startswith('_') and \
name not in keyword.kwlist]
# self.file = open("file") :
# 1st 'file' colorized normal, 2nd as builtin, 3rd as string
builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b"
comment = any("COMMENT", [r"#[^\n]*"])
stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR|rb|rB|Rb|RB)?"
sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?'
sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
dq3string = stringprefix + r'"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
string = any("STRING", [sq3string, dq3string, sqstring, dqstring])
return kw + "|" + builtin + "|" + comment + "|" + string +\
"|" + any("SYNC", [r"\n"])
prog = re.compile(make_pat(), re.S)
idprog = re.compile(r"\s+(\w+)", re.S)
class ColorDelegator(Delegator):
def __init__(self):
Delegator.__init__(self)
self.prog = prog
self.idprog = idprog
self.LoadTagDefs()
def setdelegate(self, delegate):
if self.delegate is not None:
self.unbind("<<toggle-auto-coloring>>")
Delegator.setdelegate(self, delegate)
if delegate is not None:
self.config_colors()
self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event)
self.notify_range("1.0", "end")
else:
# No delegate - stop any colorizing
self.stop_colorizing = True
self.allow_colorizing = False
def config_colors(self):
for tag, cnf in self.tagdefs.items():
if cnf:
self.tag_configure(tag, **cnf)
self.tag_raise('sel')
def LoadTagDefs(self):
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs = {
"COMMENT": idleConf.GetHighlight(theme, "comment"),
"KEYWORD": idleConf.GetHighlight(theme, "keyword"),
"BUILTIN": idleConf.GetHighlight(theme, "builtin"),
"STRING": idleConf.GetHighlight(theme, "string"),
"DEFINITION": idleConf.GetHighlight(theme, "definition"),
"SYNC": {'background':None,'foreground':None},
"TODO": {'background':None,'foreground':None},
"ERROR": idleConf.GetHighlight(theme, "error"),
# The following is used by ReplaceDialog:
"hit": idleConf.GetHighlight(theme, "hit"),
}
if DEBUG: print('tagdefs',self.tagdefs)
def insert(self, index, chars, tags=None):
index = self.index(index)
self.delegate.insert(index, chars, tags)
self.notify_range(index, index + "+%dc" % len(chars))
def delete(self, index1, index2=None):
index1 = self.index(index1)
self.delegate.delete(index1, index2)
self.notify_range(index1)
after_id = None
allow_colorizing = True
colorizing = False
def notify_range(self, index1, index2=None):
self.tag_add("TODO", index1, index2)
if self.after_id:
if DEBUG: print("colorizing already scheduled")
return
if self.colorizing:
self.stop_colorizing = True
if DEBUG: print("stop colorizing")
if self.allow_colorizing:
if DEBUG: print("schedule colorizing")
self.after_id = self.after(1, self.recolorize)
close_when_done = None # Window to be closed when done colorizing
def close(self, close_when_done=None):
if self.after_id:
after_id = self.after_id
self.after_id = None
if DEBUG: print("cancel scheduled recolorizer")
self.after_cancel(after_id)
self.allow_colorizing = False
self.stop_colorizing = True
if close_when_done:
if not self.colorizing:
close_when_done.destroy()
else:
self.close_when_done = close_when_done
def toggle_colorize_event(self, event):
if self.after_id:
after_id = self.after_id
self.after_id = None
if DEBUG: print("cancel scheduled recolorizer")
self.after_cancel(after_id)
if self.allow_colorizing and self.colorizing:
if DEBUG: print("stop colorizing")
self.stop_colorizing = True
self.allow_colorizing = not self.allow_colorizing
if self.allow_colorizing and not self.colorizing:
self.after_id = self.after(1, self.recolorize)
if DEBUG:
print("auto colorizing turned",\
self.allow_colorizing and "on" or "off")
return "break"
def recolorize(self):
self.after_id = None
if not self.delegate:
if DEBUG: print("no delegate")
return
if not self.allow_colorizing:
if DEBUG: print("auto colorizing is off")
return
if self.colorizing:
if DEBUG: print("already colorizing")
return
try:
self.stop_colorizing = False
self.colorizing = True
if DEBUG: print("colorizing...")
t0 = time.perf_counter()
self.recolorize_main()
t1 = time.perf_counter()
if DEBUG: print("%.3f seconds" % (t1-t0))
finally:
self.colorizing = False
if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"):
if DEBUG: print("reschedule colorizing")
self.after_id = self.after(1, self.recolorize)
if self.close_when_done:
top = self.close_when_done
self.close_when_done = None
top.destroy()
def recolorize_main(self):
next = "1.0"
while True:
item = self.tag_nextrange("TODO", next)
if not item:
break
head, tail = item
self.tag_remove("SYNC", head, tail)
item = self.tag_prevrange("SYNC", head)
if item:
head = item[1]
else:
head = "1.0"
chars = ""
next = head
lines_to_get = 1
ok = False
while not ok:
mark = next
next = self.index(mark + "+%d lines linestart" %
lines_to_get)
lines_to_get = min(lines_to_get * 2, 100)
ok = "SYNC" in self.tag_names(next + "-1c")
line = self.get(mark, next)
##print head, "get", mark, next, "->", repr(line)
if not line:
return
for tag in self.tagdefs:
self.tag_remove(tag, mark, next)
chars = chars + line
m = self.prog.search(chars)
while m:
for key, value in m.groupdict().items():
if value:
a, b = m.span(key)
self.tag_add(key,
head + "+%dc" % a,
head + "+%dc" % b)
if value in ("def", "class"):
m1 = self.idprog.match(chars, b)
if m1:
a, b = m1.span(1)
self.tag_add("DEFINITION",
head + "+%dc" % a,
head + "+%dc" % b)
m = self.prog.search(chars, m.end())
if "SYNC" in self.tag_names(next + "-1c"):
head = next
chars = ""
else:
ok = False
if not ok:
# We're in an inconsistent state, and the call to
# update may tell us to stop. It may also change
# the correct value for "next" (since this is a
# line.col string, not a true mark). So leave a
# crumb telling the next invocation to resume here
# in case update tells us to leave.
self.tag_add("TODO", next)
self.update()
if self.stop_colorizing:
if DEBUG: print("colorizing stopped")
return
def removecolors(self):
for tag in self.tagdefs:
self.tag_remove(tag, "1.0", "end")
def _color_delegator(parent):
from idlelib.Percolator import Percolator
root = Tk()
root.title("Test ColorDelegator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
source = "if somename: x = 'abc' # comment\nprint"
text = Text(root, background="white")
text.insert("insert", source)
text.pack(expand=1, fill="both")
p = Percolator(text)
d = ColorDelegator()
p.insertfilter(d)
root.mainloop()
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_color_delegator)
| {
"content_hash": "9bfecd37d5406d6582bff8409391d125",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 77,
"avg_line_length": 38.10629921259842,
"alnum_prop": 0.5058373798946172,
"repo_name": "OptimusGitEtna/RestSymf",
"id": "22bb22f82bc4c46bdb63da91acc6b1854cf26a78",
"size": "9679",
"binary": false,
"copies": "59",
"ref": "refs/heads/master",
"path": "Python-3.4.2/Lib/idlelib/ColorDelegator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "594205"
},
{
"name": "C",
"bytes": "15348597"
},
{
"name": "C++",
"bytes": "65109"
},
{
"name": "CSS",
"bytes": "12039"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "JavaScript",
"bytes": "10597"
},
{
"name": "Makefile",
"bytes": "9444"
},
{
"name": "Objective-C",
"bytes": "1390141"
},
{
"name": "PHP",
"bytes": "93070"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24018306"
},
{
"name": "Shell",
"bytes": "440753"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
package org.kaaproject.kaa.server.control.service.loadmgmt;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
import org.junit.Test;
import org.kaaproject.kaa.server.common.zk.gen.LoadInfo;
import org.kaaproject.kaa.server.control.service.loadmgmt.dynamicmgmt.OperationsServerLoadHistory;
import java.util.Random;
/**
* @author Andrey Panasenko
*/
public class OperationsServerLoadHistoryTest {
private static long MAX_HISTORY_TIME_LIVE = 3000;
private static Random rnd = new Random();
/**
* Test method for {@link org.kaaproject.kaa.server.control.service.loadmgmt.dynamicmgmt.OperationsServerLoadHistory#OperationsServerLoadHistory(long)}.
*/
@Test
public void testOperationsServerLoadHistory() {
OperationsServerLoadHistory hist = new OperationsServerLoadHistory(MAX_HISTORY_TIME_LIVE);
assertNotNull(hist);
}
/**
* Test method for {@link org.kaaproject.kaa.server.control.service.loadmgmt.dynamicmgmt.OperationsServerLoadHistory#addOpsServerLoad(LoadInfo)}.
*/
@Test
public void testAddOpsServerLoad() {
OperationsServerLoadHistory hist = new OperationsServerLoadHistory(MAX_HISTORY_TIME_LIVE);
hist.addOpsServerLoad(new LoadInfo(2, 1.0));
fillOutHistory(hist, 1000, 5);
assertNotNull(hist.getHistory());
if (hist.getHistory().size() >= 5) {
fail("testAddOpsServerLoad, removeOld history failed, size should be no more than 4, but " + hist.getHistory().size());
}
}
/**
* @param hist
* @param period
* @param number
*/
private void fillOutHistory(OperationsServerLoadHistory hist, long period, int number) {
for (int i = 0; i < number; i++) {
try {
Thread.sleep(period);
hist.addOpsServerLoad(new LoadInfo(rnd.nextInt(1000), 1.0));
} catch (InterruptedException e) {
fail(e.toString());
}
}
}
/**
* Test method for {@link org.kaaproject.kaa.server.control.service.loadmgmt.dynamicmgmt.OperationsServerLoadHistory#getHistory()}.
*/
@Test
public void testGetHistory() {
OperationsServerLoadHistory hist = new OperationsServerLoadHistory(MAX_HISTORY_TIME_LIVE);
hist.addOpsServerLoad(new LoadInfo(2, 1.0));
assertNotNull(hist.getHistory());
assertEquals(1, hist.getHistory().size());
assertEquals(2, hist.getHistory().get(0).getLoadInfo().getEndpointCount().intValue());
}
/**
* Test method for {@link org.kaaproject.kaa.server.control.service.loadmgmt.dynamicmgmt.OperationsServerLoadHistory#getMaxHistoryTimeLive()}.
*/
@Test
public void testGetMaxHistoryTimeLive() {
OperationsServerLoadHistory hist = new OperationsServerLoadHistory(MAX_HISTORY_TIME_LIVE);
assertNotNull(hist);
assertEquals(MAX_HISTORY_TIME_LIVE, hist.getMaxHistoryTimeLive());
}
}
| {
"content_hash": "e39272d140abd01e1abddc968c652c26",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 154,
"avg_line_length": 33.49411764705882,
"alnum_prop": 0.7298911134527573,
"repo_name": "sashadidukh/kaa",
"id": "1a601ca9fc5a0ca60fbda829f99007c5db8277dc",
"size": "3453",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/node/src/test/java/org/kaaproject/kaa/server/control/service/loadmgmt/OperationsServerLoadHistoryTest.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5641"
},
{
"name": "C",
"bytes": "1333836"
},
{
"name": "C++",
"bytes": "1097116"
},
{
"name": "CMake",
"bytes": "99396"
},
{
"name": "CSS",
"bytes": "78522"
},
{
"name": "GCC Machine Description",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "20318"
},
{
"name": "Java",
"bytes": "9749624"
},
{
"name": "JavaScript",
"bytes": "14069"
},
{
"name": "Makefile",
"bytes": "5531"
},
{
"name": "Nix",
"bytes": "20497"
},
{
"name": "Objective-C",
"bytes": "1271558"
},
{
"name": "Python",
"bytes": "140178"
},
{
"name": "Ruby",
"bytes": "11176"
},
{
"name": "Shell",
"bytes": "121002"
},
{
"name": "Thrift",
"bytes": "10826"
},
{
"name": "XSLT",
"bytes": "4062"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.