id int32 0 165k | repo stringlengths 7 58 | path stringlengths 12 218 | func_name stringlengths 3 140 | original_string stringlengths 73 34.1k | language stringclasses 1 value | code stringlengths 73 34.1k | code_tokens list | docstring stringlengths 3 16k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 105 339 |
|---|---|---|---|---|---|---|---|---|---|---|---|
25,700 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/converter/csv/CsvToJsonConverter.java | CsvToJsonConverter.convertRecord | @Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
String strDelimiter = workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_DELIMITER);
if (Strings.isNullOrEmpty(strDelimiter)) {
throw new IllegalArgumentException("Delimiter cannot be empty");
}
InputStreamCSVReader reader = new InputStreamCSVReader(inputRecord, strDelimiter.charAt(0),
workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR, ConfigurationKeys.DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR).charAt(0));
List<String> recordSplit;
recordSplit = Lists.newArrayList(reader.splitRecord());
JsonObject outputRecord = new JsonObject();
for (int i = 0; i < outputSchema.size(); i++) {
if (i < recordSplit.size()) {
if (recordSplit.get(i) == null) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else if (recordSplit.get(i).isEmpty() || recordSplit.get(i).toLowerCase().equals(NULL)) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else {
outputRecord.addProperty(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), recordSplit.get(i));
}
} else {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
}
}
return new SingleRecordIterable<>(outputRecord);
} catch (Exception e) {
throw new DataConversionException(e);
}
} | java | @Override
public Iterable<JsonObject> convertRecord(JsonArray outputSchema, String inputRecord, WorkUnitState workUnit)
throws DataConversionException {
try {
String strDelimiter = workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_DELIMITER);
if (Strings.isNullOrEmpty(strDelimiter)) {
throw new IllegalArgumentException("Delimiter cannot be empty");
}
InputStreamCSVReader reader = new InputStreamCSVReader(inputRecord, strDelimiter.charAt(0),
workUnit.getProp(ConfigurationKeys.CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR, ConfigurationKeys.DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR).charAt(0));
List<String> recordSplit;
recordSplit = Lists.newArrayList(reader.splitRecord());
JsonObject outputRecord = new JsonObject();
for (int i = 0; i < outputSchema.size(); i++) {
if (i < recordSplit.size()) {
if (recordSplit.get(i) == null) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else if (recordSplit.get(i).isEmpty() || recordSplit.get(i).toLowerCase().equals(NULL)) {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
} else {
outputRecord.addProperty(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), recordSplit.get(i));
}
} else {
outputRecord.add(outputSchema.get(i).getAsJsonObject().get("columnName").getAsString(), JsonNull.INSTANCE);
}
}
return new SingleRecordIterable<>(outputRecord);
} catch (Exception e) {
throw new DataConversionException(e);
}
} | [
"@",
"Override",
"public",
"Iterable",
"<",
"JsonObject",
">",
"convertRecord",
"(",
"JsonArray",
"outputSchema",
",",
"String",
"inputRecord",
",",
"WorkUnitState",
"workUnit",
")",
"throws",
"DataConversionException",
"{",
"try",
"{",
"String",
"strDelimiter",
"=",
"workUnit",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"CONVERTER_CSV_TO_JSON_DELIMITER",
")",
";",
"if",
"(",
"Strings",
".",
"isNullOrEmpty",
"(",
"strDelimiter",
")",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"\"Delimiter cannot be empty\"",
")",
";",
"}",
"InputStreamCSVReader",
"reader",
"=",
"new",
"InputStreamCSVReader",
"(",
"inputRecord",
",",
"strDelimiter",
".",
"charAt",
"(",
"0",
")",
",",
"workUnit",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR",
",",
"ConfigurationKeys",
".",
"DEFAULT_CONVERTER_CSV_TO_JSON_ENCLOSEDCHAR",
")",
".",
"charAt",
"(",
"0",
")",
")",
";",
"List",
"<",
"String",
">",
"recordSplit",
";",
"recordSplit",
"=",
"Lists",
".",
"newArrayList",
"(",
"reader",
".",
"splitRecord",
"(",
")",
")",
";",
"JsonObject",
"outputRecord",
"=",
"new",
"JsonObject",
"(",
")",
";",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"outputSchema",
".",
"size",
"(",
")",
";",
"i",
"++",
")",
"{",
"if",
"(",
"i",
"<",
"recordSplit",
".",
"size",
"(",
")",
")",
"{",
"if",
"(",
"recordSplit",
".",
"get",
"(",
"i",
")",
"==",
"null",
")",
"{",
"outputRecord",
".",
"add",
"(",
"outputSchema",
".",
"get",
"(",
"i",
")",
".",
"getAsJsonObject",
"(",
")",
".",
"get",
"(",
"\"columnName\"",
")",
".",
"getAsString",
"(",
")",
",",
"JsonNull",
".",
"INSTANCE",
")",
";",
"}",
"else",
"if",
"(",
"recordSplit",
".",
"get",
"(",
"i",
")",
".",
"isEmpty",
"(",
")",
"||",
"recordSplit",
".",
"get",
"(",
"i",
")",
".",
"toLowerCase",
"(",
")",
".",
"equals",
"(",
"NULL",
")",
")",
"{",
"outputRecord",
".",
"add",
"(",
"outputSchema",
".",
"get",
"(",
"i",
")",
".",
"getAsJsonObject",
"(",
")",
".",
"get",
"(",
"\"columnName\"",
")",
".",
"getAsString",
"(",
")",
",",
"JsonNull",
".",
"INSTANCE",
")",
";",
"}",
"else",
"{",
"outputRecord",
".",
"addProperty",
"(",
"outputSchema",
".",
"get",
"(",
"i",
")",
".",
"getAsJsonObject",
"(",
")",
".",
"get",
"(",
"\"columnName\"",
")",
".",
"getAsString",
"(",
")",
",",
"recordSplit",
".",
"get",
"(",
"i",
")",
")",
";",
"}",
"}",
"else",
"{",
"outputRecord",
".",
"add",
"(",
"outputSchema",
".",
"get",
"(",
"i",
")",
".",
"getAsJsonObject",
"(",
")",
".",
"get",
"(",
"\"columnName\"",
")",
".",
"getAsString",
"(",
")",
",",
"JsonNull",
".",
"INSTANCE",
")",
";",
"}",
"}",
"return",
"new",
"SingleRecordIterable",
"<>",
"(",
"outputRecord",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"throw",
"new",
"DataConversionException",
"(",
"e",
")",
";",
"}",
"}"
] | Takes in a record with format String and splits the data based on SOURCE_SCHEMA_DELIMITER
Uses the inputSchema and the split record to convert the record to a JsonObject
@return a JsonObject representing the record
@throws DataConversionException | [
"Takes",
"in",
"a",
"record",
"with",
"format",
"String",
"and",
"splits",
"the",
"data",
"based",
"on",
"SOURCE_SCHEMA_DELIMITER",
"Uses",
"the",
"inputSchema",
"and",
"the",
"split",
"record",
"to",
"convert",
"the",
"record",
"to",
"a",
"JsonObject"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/converter/csv/CsvToJsonConverter.java#L60-L92 |
25,701 | apache/incubator-gobblin | gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegistrationUnitComparator.java | HiveRegistrationUnitComparator.compareAll | @SuppressWarnings("unchecked")
public T compareAll() {
this.compareInputFormat().compareOutputFormat().compareIsCompressed().compareIsStoredAsSubDirs().compareNumBuckets()
.compareBucketCols().compareRawLocation().compareParameters();
return (T) this;
} | java | @SuppressWarnings("unchecked")
public T compareAll() {
this.compareInputFormat().compareOutputFormat().compareIsCompressed().compareIsStoredAsSubDirs().compareNumBuckets()
.compareBucketCols().compareRawLocation().compareParameters();
return (T) this;
} | [
"@",
"SuppressWarnings",
"(",
"\"unchecked\"",
")",
"public",
"T",
"compareAll",
"(",
")",
"{",
"this",
".",
"compareInputFormat",
"(",
")",
".",
"compareOutputFormat",
"(",
")",
".",
"compareIsCompressed",
"(",
")",
".",
"compareIsStoredAsSubDirs",
"(",
")",
".",
"compareNumBuckets",
"(",
")",
".",
"compareBucketCols",
"(",
")",
".",
"compareRawLocation",
"(",
")",
".",
"compareParameters",
"(",
")",
";",
"return",
"(",
"T",
")",
"this",
";",
"}"
] | Compare all parameters. | [
"Compare",
"all",
"parameters",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegistrationUnitComparator.java#L158-L163 |
25,702 | apache/incubator-gobblin | gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/Counters.java | Counters.inc | public void inc(E e, long n) {
if (counters != null && counters.containsKey(e)) {
counters.get(e).inc(n);
}
} | java | public void inc(E e, long n) {
if (counters != null && counters.containsKey(e)) {
counters.get(e).inc(n);
}
} | [
"public",
"void",
"inc",
"(",
"E",
"e",
",",
"long",
"n",
")",
"{",
"if",
"(",
"counters",
"!=",
"null",
"&&",
"counters",
".",
"containsKey",
"(",
"e",
")",
")",
"{",
"counters",
".",
"get",
"(",
"e",
")",
".",
"inc",
"(",
"n",
")",
";",
"}",
"}"
] | Increment the counter associated with enum value passed.
@param e Counter to increment.
@param n the value to increment | [
"Increment",
"the",
"counter",
"associated",
"with",
"enum",
"value",
"passed",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/Counters.java#L62-L66 |
25,703 | apache/incubator-gobblin | gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/Counters.java | Counters.getCount | public long getCount(E e) {
if (counters.containsKey(e)) {
return counters.get(e).getCount();
} else {
return 0l;
}
} | java | public long getCount(E e) {
if (counters.containsKey(e)) {
return counters.get(e).getCount();
} else {
return 0l;
}
} | [
"public",
"long",
"getCount",
"(",
"E",
"e",
")",
"{",
"if",
"(",
"counters",
".",
"containsKey",
"(",
"e",
")",
")",
"{",
"return",
"counters",
".",
"get",
"(",
"e",
")",
".",
"getCount",
"(",
")",
";",
"}",
"else",
"{",
"return",
"0l",
";",
"}",
"}"
] | Get count for counter associated with enum value passed.
@param e Counter to query.
@return the count for this counter. | [
"Get",
"count",
"for",
"counter",
"associated",
"with",
"enum",
"value",
"passed",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/Counters.java#L73-L79 |
25,704 | apache/incubator-gobblin | gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegister.java | HiveRegister.addOrAlterPartition | public void addOrAlterPartition(HiveTable table, HivePartition partition) throws IOException {
if (!addPartitionIfNotExists(table, partition)) {
alterPartition(table, partition);
}
} | java | public void addOrAlterPartition(HiveTable table, HivePartition partition) throws IOException {
if (!addPartitionIfNotExists(table, partition)) {
alterPartition(table, partition);
}
} | [
"public",
"void",
"addOrAlterPartition",
"(",
"HiveTable",
"table",
",",
"HivePartition",
"partition",
")",
"throws",
"IOException",
"{",
"if",
"(",
"!",
"addPartitionIfNotExists",
"(",
"table",
",",
"partition",
")",
")",
"{",
"alterPartition",
"(",
"table",
",",
"partition",
")",
";",
"}",
"}"
] | Add a partition to a table if not exists, or alter a partition if exists.
@param table the {@link HiveTable} to which the partition belongs.
@param partition a {@link HivePartition} to which the existing partition should be updated.
@throws IOException | [
"Add",
"a",
"partition",
"to",
"a",
"table",
"if",
"not",
"exists",
"or",
"alter",
"a",
"partition",
"if",
"exists",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/HiveRegister.java#L285-L289 |
25,705 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/AvroFileExtractor.java | AvroFileExtractor.getSchema | @Override
public Schema getSchema() {
if (this.workUnit.contains(ConfigurationKeys.SOURCE_SCHEMA)) {
return new Schema.Parser().parse(this.workUnit.getProp(ConfigurationKeys.SOURCE_SCHEMA));
}
AvroFsHelper hfsHelper = (AvroFsHelper) this.fsHelper;
if (this.filesToPull.isEmpty()) {
return null;
}
try {
return hfsHelper.getAvroSchema(this.filesToPull.get(0));
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
return null;
}
} | java | @Override
public Schema getSchema() {
if (this.workUnit.contains(ConfigurationKeys.SOURCE_SCHEMA)) {
return new Schema.Parser().parse(this.workUnit.getProp(ConfigurationKeys.SOURCE_SCHEMA));
}
AvroFsHelper hfsHelper = (AvroFsHelper) this.fsHelper;
if (this.filesToPull.isEmpty()) {
return null;
}
try {
return hfsHelper.getAvroSchema(this.filesToPull.get(0));
} catch (FileBasedHelperException e) {
Throwables.propagate(e);
return null;
}
} | [
"@",
"Override",
"public",
"Schema",
"getSchema",
"(",
")",
"{",
"if",
"(",
"this",
".",
"workUnit",
".",
"contains",
"(",
"ConfigurationKeys",
".",
"SOURCE_SCHEMA",
")",
")",
"{",
"return",
"new",
"Schema",
".",
"Parser",
"(",
")",
".",
"parse",
"(",
"this",
".",
"workUnit",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_SCHEMA",
")",
")",
";",
"}",
"AvroFsHelper",
"hfsHelper",
"=",
"(",
"AvroFsHelper",
")",
"this",
".",
"fsHelper",
";",
"if",
"(",
"this",
".",
"filesToPull",
".",
"isEmpty",
"(",
")",
")",
"{",
"return",
"null",
";",
"}",
"try",
"{",
"return",
"hfsHelper",
".",
"getAvroSchema",
"(",
"this",
".",
"filesToPull",
".",
"get",
"(",
"0",
")",
")",
";",
"}",
"catch",
"(",
"FileBasedHelperException",
"e",
")",
"{",
"Throwables",
".",
"propagate",
"(",
"e",
")",
";",
"return",
"null",
";",
"}",
"}"
] | Assumption is that all files in the input directory have the same schema | [
"Assumption",
"is",
"that",
"all",
"files",
"in",
"the",
"input",
"directory",
"have",
"the",
"same",
"schema"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/hadoop/AvroFileExtractor.java#L56-L72 |
25,706 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionTimeRangeVerifier.java | CompactionTimeRangeVerifier.getMachedLookbackTime | public static String getMachedLookbackTime (String datasetName, String datasetsAndLookBacks, String sysDefaultLookback) {
String defaultLookback = sysDefaultLookback;
for (String entry : Splitter.on(";").trimResults()
.omitEmptyStrings().splitToList(datasetsAndLookBacks)) {
List<String> datasetAndLookbackTime = Splitter.on(":").trimResults().omitEmptyStrings().splitToList(entry);
if (datasetAndLookbackTime.size() == 1) {
defaultLookback = datasetAndLookbackTime.get(0);
} else if (datasetAndLookbackTime.size() == 2) {
String regex = datasetAndLookbackTime.get(0);
if (Pattern.compile(regex).matcher(datasetName).find()) {
return datasetAndLookbackTime.get(1);
}
} else {
log.error("Invalid format in {}, {} cannot find its lookback time", datasetsAndLookBacks, datasetName);
}
}
return defaultLookback;
} | java | public static String getMachedLookbackTime (String datasetName, String datasetsAndLookBacks, String sysDefaultLookback) {
String defaultLookback = sysDefaultLookback;
for (String entry : Splitter.on(";").trimResults()
.omitEmptyStrings().splitToList(datasetsAndLookBacks)) {
List<String> datasetAndLookbackTime = Splitter.on(":").trimResults().omitEmptyStrings().splitToList(entry);
if (datasetAndLookbackTime.size() == 1) {
defaultLookback = datasetAndLookbackTime.get(0);
} else if (datasetAndLookbackTime.size() == 2) {
String regex = datasetAndLookbackTime.get(0);
if (Pattern.compile(regex).matcher(datasetName).find()) {
return datasetAndLookbackTime.get(1);
}
} else {
log.error("Invalid format in {}, {} cannot find its lookback time", datasetsAndLookBacks, datasetName);
}
}
return defaultLookback;
} | [
"public",
"static",
"String",
"getMachedLookbackTime",
"(",
"String",
"datasetName",
",",
"String",
"datasetsAndLookBacks",
",",
"String",
"sysDefaultLookback",
")",
"{",
"String",
"defaultLookback",
"=",
"sysDefaultLookback",
";",
"for",
"(",
"String",
"entry",
":",
"Splitter",
".",
"on",
"(",
"\";\"",
")",
".",
"trimResults",
"(",
")",
".",
"omitEmptyStrings",
"(",
")",
".",
"splitToList",
"(",
"datasetsAndLookBacks",
")",
")",
"{",
"List",
"<",
"String",
">",
"datasetAndLookbackTime",
"=",
"Splitter",
".",
"on",
"(",
"\":\"",
")",
".",
"trimResults",
"(",
")",
".",
"omitEmptyStrings",
"(",
")",
".",
"splitToList",
"(",
"entry",
")",
";",
"if",
"(",
"datasetAndLookbackTime",
".",
"size",
"(",
")",
"==",
"1",
")",
"{",
"defaultLookback",
"=",
"datasetAndLookbackTime",
".",
"get",
"(",
"0",
")",
";",
"}",
"else",
"if",
"(",
"datasetAndLookbackTime",
".",
"size",
"(",
")",
"==",
"2",
")",
"{",
"String",
"regex",
"=",
"datasetAndLookbackTime",
".",
"get",
"(",
"0",
")",
";",
"if",
"(",
"Pattern",
".",
"compile",
"(",
"regex",
")",
".",
"matcher",
"(",
"datasetName",
")",
".",
"find",
"(",
")",
")",
"{",
"return",
"datasetAndLookbackTime",
".",
"get",
"(",
"1",
")",
";",
"}",
"}",
"else",
"{",
"log",
".",
"error",
"(",
"\"Invalid format in {}, {} cannot find its lookback time\"",
",",
"datasetsAndLookBacks",
",",
"datasetName",
")",
";",
"}",
"}",
"return",
"defaultLookback",
";",
"}"
] | Find the correct lookback time for a given dataset.
@param datasetsAndLookBacks Lookback string for multiple datasets. Datasets is represented by Regex pattern.
Multiple 'datasets and lookback' pairs were joined by semi-colon. A default
lookback time can be given without any Regex prefix. If nothing found, we will use
{@param sysDefaultLookback}.
Example Format: [Regex1]:[T1];[Regex2]:[T2];[DEFAULT_T];[Regex3]:[T3]
Ex. Identity.*:1d2h;22h;BizProfile.BizCompany:3h (22h is default lookback time)
@param sysDefaultLookback If user doesn't specify any lookback time for {@param datasetName}, also there is no default
lookback time inside {@param datasetsAndLookBacks}, this system default lookback time is return.
@param datasetName A description of dataset without time partition information. Example 'Identity/MemberAccount' or 'PageViewEvent'
@return The lookback time matched with given dataset. | [
"Find",
"the",
"correct",
"lookback",
"time",
"for",
"a",
"given",
"dataset",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/verify/CompactionTimeRangeVerifier.java#L112-L130 |
25,707 | apache/incubator-gobblin | gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/FSFlowTemplateCatalog.java | FSFlowTemplateCatalog.validateTemplateURI | private boolean validateTemplateURI(URI flowURI) {
if (!this.sysConfig.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)) {
log.error("Missing config " + ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY);
return false;
}
if (!flowURI.getScheme().equals(FS_SCHEME)) {
log.error(
"Expected scheme " + FS_SCHEME + " got unsupported scheme " + flowURI.getScheme());
return false;
}
return true;
} | java | private boolean validateTemplateURI(URI flowURI) {
if (!this.sysConfig.hasPath(ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY)) {
log.error("Missing config " + ServiceConfigKeys.TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY);
return false;
}
if (!flowURI.getScheme().equals(FS_SCHEME)) {
log.error(
"Expected scheme " + FS_SCHEME + " got unsupported scheme " + flowURI.getScheme());
return false;
}
return true;
} | [
"private",
"boolean",
"validateTemplateURI",
"(",
"URI",
"flowURI",
")",
"{",
"if",
"(",
"!",
"this",
".",
"sysConfig",
".",
"hasPath",
"(",
"ServiceConfigKeys",
".",
"TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY",
")",
")",
"{",
"log",
".",
"error",
"(",
"\"Missing config \"",
"+",
"ServiceConfigKeys",
".",
"TEMPLATE_CATALOGS_FULLY_QUALIFIED_PATH_KEY",
")",
";",
"return",
"false",
";",
"}",
"if",
"(",
"!",
"flowURI",
".",
"getScheme",
"(",
")",
".",
"equals",
"(",
"FS_SCHEME",
")",
")",
"{",
"log",
".",
"error",
"(",
"\"Expected scheme \"",
"+",
"FS_SCHEME",
"+",
"\" got unsupported scheme \"",
"+",
"flowURI",
".",
"getScheme",
"(",
")",
")",
";",
"return",
"false",
";",
"}",
"return",
"true",
";",
"}"
] | Determine if an URI of a jobTemplate or a FlowTemplate is valid.
@param flowURI The given job/flow template
@return true if the URI is valid. | [
"Determine",
"if",
"an",
"URI",
"of",
"a",
"jobTemplate",
"or",
"a",
"FlowTemplate",
"is",
"valid",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-service/src/main/java/org/apache/gobblin/service/modules/template_catalog/FSFlowTemplateCatalog.java#L176-L188 |
25,708 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerFromEntityQueryGenerator.java | HiveMaterializerFromEntityQueryGenerator.generatePublishQueries | public QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException {
QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity();
List<String> publishQueries = publishEntity.getPublishQueries();
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
List<String> cleanupQueries = publishEntity.getCleanupQueries();
List<String> cleanupDirectories = publishEntity.getCleanupDirectories();
String createFinalTableDDL =
HiveConverterUtils.generateCreateDuplicateTableDDL(outputDatabaseName, stagingTableName, outputTableName,
outputDataLocation, Optional.of(outputDatabaseName));
publishQueries.add(createFinalTableDDL);
log.debug("Create final table DDL:\n" + createFinalTableDDL);
if (!this.supportTargetPartitioning || partitionsDDLInfo.size() == 0) {
log.debug("Snapshot directory to move: " + stagingDataLocation + " to: " + outputDataLocation);
publishDirectories.put(stagingDataLocation, outputDataLocation);
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
} else {
String finalDataPartitionLocation = outputDataLocation + Path.SEPARATOR + stagingDataPartitionDirName;
Optional<Path> destPartitionLocation =
HiveConverterUtils.getDestinationPartitionLocation(destinationTableMeta, this.workUnitState,
conversionEntity.getPartition().get().getName());
finalDataPartitionLocation = HiveConverterUtils.updatePartitionLocation(finalDataPartitionLocation, this.workUnitState,
destPartitionLocation);
log.debug("Partition directory to move: " + stagingDataPartitionLocation + " to: " + finalDataPartitionLocation);
publishDirectories.put(stagingDataPartitionLocation, finalDataPartitionLocation);
List<String> dropPartitionsDDL =
HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName, partitionsDMLInfo);
log.debug("Drop partitions if exist in final table: " + dropPartitionsDDL);
publishQueries.addAll(dropPartitionsDDL);
List<String> createFinalPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(outputDatabaseName, outputTableName,
finalDataPartitionLocation, partitionsDMLInfo, Optional.<String>absent());
log.debug("Create final partition DDL: " + createFinalPartitionDDL);
publishQueries.addAll(createFinalPartitionDDL);
String dropStagingTableDDL =
HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
publishQueries.addAll(HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName,
AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity)));
}
log.info("Publish partition entity: " + publishEntity);
return publishEntity;
} | java | public QueryBasedHivePublishEntity generatePublishQueries() throws DataConversionException {
QueryBasedHivePublishEntity publishEntity = new QueryBasedHivePublishEntity();
List<String> publishQueries = publishEntity.getPublishQueries();
Map<String, String> publishDirectories = publishEntity.getPublishDirectories();
List<String> cleanupQueries = publishEntity.getCleanupQueries();
List<String> cleanupDirectories = publishEntity.getCleanupDirectories();
String createFinalTableDDL =
HiveConverterUtils.generateCreateDuplicateTableDDL(outputDatabaseName, stagingTableName, outputTableName,
outputDataLocation, Optional.of(outputDatabaseName));
publishQueries.add(createFinalTableDDL);
log.debug("Create final table DDL:\n" + createFinalTableDDL);
if (!this.supportTargetPartitioning || partitionsDDLInfo.size() == 0) {
log.debug("Snapshot directory to move: " + stagingDataLocation + " to: " + outputDataLocation);
publishDirectories.put(stagingDataLocation, outputDataLocation);
String dropStagingTableDDL = HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
} else {
String finalDataPartitionLocation = outputDataLocation + Path.SEPARATOR + stagingDataPartitionDirName;
Optional<Path> destPartitionLocation =
HiveConverterUtils.getDestinationPartitionLocation(destinationTableMeta, this.workUnitState,
conversionEntity.getPartition().get().getName());
finalDataPartitionLocation = HiveConverterUtils.updatePartitionLocation(finalDataPartitionLocation, this.workUnitState,
destPartitionLocation);
log.debug("Partition directory to move: " + stagingDataPartitionLocation + " to: " + finalDataPartitionLocation);
publishDirectories.put(stagingDataPartitionLocation, finalDataPartitionLocation);
List<String> dropPartitionsDDL =
HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName, partitionsDMLInfo);
log.debug("Drop partitions if exist in final table: " + dropPartitionsDDL);
publishQueries.addAll(dropPartitionsDDL);
List<String> createFinalPartitionDDL =
HiveAvroORCQueryGenerator.generateCreatePartitionDDL(outputDatabaseName, outputTableName,
finalDataPartitionLocation, partitionsDMLInfo, Optional.<String>absent());
log.debug("Create final partition DDL: " + createFinalPartitionDDL);
publishQueries.addAll(createFinalPartitionDDL);
String dropStagingTableDDL =
HiveAvroORCQueryGenerator.generateDropTableDDL(outputDatabaseName, stagingTableName);
log.debug("Drop staging table DDL: " + dropStagingTableDDL);
cleanupQueries.add(dropStagingTableDDL);
log.debug("Staging table directory to delete: " + stagingDataLocation);
cleanupDirectories.add(stagingDataLocation);
publishQueries.addAll(HiveAvroORCQueryGenerator.generateDropPartitionsDDL(outputDatabaseName, outputTableName,
AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity)));
}
log.info("Publish partition entity: " + publishEntity);
return publishEntity;
} | [
"public",
"QueryBasedHivePublishEntity",
"generatePublishQueries",
"(",
")",
"throws",
"DataConversionException",
"{",
"QueryBasedHivePublishEntity",
"publishEntity",
"=",
"new",
"QueryBasedHivePublishEntity",
"(",
")",
";",
"List",
"<",
"String",
">",
"publishQueries",
"=",
"publishEntity",
".",
"getPublishQueries",
"(",
")",
";",
"Map",
"<",
"String",
",",
"String",
">",
"publishDirectories",
"=",
"publishEntity",
".",
"getPublishDirectories",
"(",
")",
";",
"List",
"<",
"String",
">",
"cleanupQueries",
"=",
"publishEntity",
".",
"getCleanupQueries",
"(",
")",
";",
"List",
"<",
"String",
">",
"cleanupDirectories",
"=",
"publishEntity",
".",
"getCleanupDirectories",
"(",
")",
";",
"String",
"createFinalTableDDL",
"=",
"HiveConverterUtils",
".",
"generateCreateDuplicateTableDDL",
"(",
"outputDatabaseName",
",",
"stagingTableName",
",",
"outputTableName",
",",
"outputDataLocation",
",",
"Optional",
".",
"of",
"(",
"outputDatabaseName",
")",
")",
";",
"publishQueries",
".",
"add",
"(",
"createFinalTableDDL",
")",
";",
"log",
".",
"debug",
"(",
"\"Create final table DDL:\\n\"",
"+",
"createFinalTableDDL",
")",
";",
"if",
"(",
"!",
"this",
".",
"supportTargetPartitioning",
"||",
"partitionsDDLInfo",
".",
"size",
"(",
")",
"==",
"0",
")",
"{",
"log",
".",
"debug",
"(",
"\"Snapshot directory to move: \"",
"+",
"stagingDataLocation",
"+",
"\" to: \"",
"+",
"outputDataLocation",
")",
";",
"publishDirectories",
".",
"put",
"(",
"stagingDataLocation",
",",
"outputDataLocation",
")",
";",
"String",
"dropStagingTableDDL",
"=",
"HiveAvroORCQueryGenerator",
".",
"generateDropTableDDL",
"(",
"outputDatabaseName",
",",
"stagingTableName",
")",
";",
"log",
".",
"debug",
"(",
"\"Drop staging table DDL: \"",
"+",
"dropStagingTableDDL",
")",
";",
"cleanupQueries",
".",
"add",
"(",
"dropStagingTableDDL",
")",
";",
"log",
".",
"debug",
"(",
"\"Staging table directory to delete: \"",
"+",
"stagingDataLocation",
")",
";",
"cleanupDirectories",
".",
"add",
"(",
"stagingDataLocation",
")",
";",
"}",
"else",
"{",
"String",
"finalDataPartitionLocation",
"=",
"outputDataLocation",
"+",
"Path",
".",
"SEPARATOR",
"+",
"stagingDataPartitionDirName",
";",
"Optional",
"<",
"Path",
">",
"destPartitionLocation",
"=",
"HiveConverterUtils",
".",
"getDestinationPartitionLocation",
"(",
"destinationTableMeta",
",",
"this",
".",
"workUnitState",
",",
"conversionEntity",
".",
"getPartition",
"(",
")",
".",
"get",
"(",
")",
".",
"getName",
"(",
")",
")",
";",
"finalDataPartitionLocation",
"=",
"HiveConverterUtils",
".",
"updatePartitionLocation",
"(",
"finalDataPartitionLocation",
",",
"this",
".",
"workUnitState",
",",
"destPartitionLocation",
")",
";",
"log",
".",
"debug",
"(",
"\"Partition directory to move: \"",
"+",
"stagingDataPartitionLocation",
"+",
"\" to: \"",
"+",
"finalDataPartitionLocation",
")",
";",
"publishDirectories",
".",
"put",
"(",
"stagingDataPartitionLocation",
",",
"finalDataPartitionLocation",
")",
";",
"List",
"<",
"String",
">",
"dropPartitionsDDL",
"=",
"HiveAvroORCQueryGenerator",
".",
"generateDropPartitionsDDL",
"(",
"outputDatabaseName",
",",
"outputTableName",
",",
"partitionsDMLInfo",
")",
";",
"log",
".",
"debug",
"(",
"\"Drop partitions if exist in final table: \"",
"+",
"dropPartitionsDDL",
")",
";",
"publishQueries",
".",
"addAll",
"(",
"dropPartitionsDDL",
")",
";",
"List",
"<",
"String",
">",
"createFinalPartitionDDL",
"=",
"HiveAvroORCQueryGenerator",
".",
"generateCreatePartitionDDL",
"(",
"outputDatabaseName",
",",
"outputTableName",
",",
"finalDataPartitionLocation",
",",
"partitionsDMLInfo",
",",
"Optional",
".",
"<",
"String",
">",
"absent",
"(",
")",
")",
";",
"log",
".",
"debug",
"(",
"\"Create final partition DDL: \"",
"+",
"createFinalPartitionDDL",
")",
";",
"publishQueries",
".",
"addAll",
"(",
"createFinalPartitionDDL",
")",
";",
"String",
"dropStagingTableDDL",
"=",
"HiveAvroORCQueryGenerator",
".",
"generateDropTableDDL",
"(",
"outputDatabaseName",
",",
"stagingTableName",
")",
";",
"log",
".",
"debug",
"(",
"\"Drop staging table DDL: \"",
"+",
"dropStagingTableDDL",
")",
";",
"cleanupQueries",
".",
"add",
"(",
"dropStagingTableDDL",
")",
";",
"log",
".",
"debug",
"(",
"\"Staging table directory to delete: \"",
"+",
"stagingDataLocation",
")",
";",
"cleanupDirectories",
".",
"add",
"(",
"stagingDataLocation",
")",
";",
"publishQueries",
".",
"addAll",
"(",
"HiveAvroORCQueryGenerator",
".",
"generateDropPartitionsDDL",
"(",
"outputDatabaseName",
",",
"outputTableName",
",",
"AbstractAvroToOrcConverter",
".",
"getDropPartitionsDDLInfo",
"(",
"conversionEntity",
")",
")",
")",
";",
"}",
"log",
".",
"info",
"(",
"\"Publish partition entity: \"",
"+",
"publishEntity",
")",
";",
"return",
"publishEntity",
";",
"}"
] | Returns a QueryBasedHivePublishEntity which includes publish level queries and cleanup commands.
@return QueryBasedHivePublishEntity
@throws DataConversionException | [
"Returns",
"a",
"QueryBasedHivePublishEntity",
"which",
"includes",
"publish",
"level",
"queries",
"and",
"cleanup",
"commands",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/materializer/HiveMaterializerFromEntityQueryGenerator.java#L114-L175 |
25,709 | apache/incubator-gobblin | gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpRequestBuilder.java | ApacheHttpRequestBuilder.buildWriteRequest | private ApacheHttpRequest<GenericRecord> buildWriteRequest(BufferedRecord<GenericRecord> record) {
if (record == null) {
return null;
}
ApacheHttpRequest<GenericRecord> request = new ApacheHttpRequest<>();
HttpOperation httpOperation = HttpUtils.toHttpOperation(record.getRecord());
// Set uri
URI uri = HttpUtils.buildURI(urlTemplate, httpOperation.getKeys(), httpOperation.getQueryParams());
if (uri == null) {
return null;
}
RequestBuilder builder = RequestBuilder.create(verb.toUpperCase());
builder.setUri(uri);
// Set headers
Map<String, String> headers = httpOperation.getHeaders();
if (headers != null && headers.size() != 0) {
for (Map.Entry<String, String> header : headers.entrySet()) {
builder.setHeader(header.getKey(), header.getValue());
}
}
// Add payload
int bytesWritten = addPayload(builder, httpOperation.getBody());
if (bytesWritten == -1) {
throw new RuntimeException("Fail to write payload into request");
}
request.setRawRequest(build(builder));
request.markRecord(record, bytesWritten);
return request;
} | java | private ApacheHttpRequest<GenericRecord> buildWriteRequest(BufferedRecord<GenericRecord> record) {
if (record == null) {
return null;
}
ApacheHttpRequest<GenericRecord> request = new ApacheHttpRequest<>();
HttpOperation httpOperation = HttpUtils.toHttpOperation(record.getRecord());
// Set uri
URI uri = HttpUtils.buildURI(urlTemplate, httpOperation.getKeys(), httpOperation.getQueryParams());
if (uri == null) {
return null;
}
RequestBuilder builder = RequestBuilder.create(verb.toUpperCase());
builder.setUri(uri);
// Set headers
Map<String, String> headers = httpOperation.getHeaders();
if (headers != null && headers.size() != 0) {
for (Map.Entry<String, String> header : headers.entrySet()) {
builder.setHeader(header.getKey(), header.getValue());
}
}
// Add payload
int bytesWritten = addPayload(builder, httpOperation.getBody());
if (bytesWritten == -1) {
throw new RuntimeException("Fail to write payload into request");
}
request.setRawRequest(build(builder));
request.markRecord(record, bytesWritten);
return request;
} | [
"private",
"ApacheHttpRequest",
"<",
"GenericRecord",
">",
"buildWriteRequest",
"(",
"BufferedRecord",
"<",
"GenericRecord",
">",
"record",
")",
"{",
"if",
"(",
"record",
"==",
"null",
")",
"{",
"return",
"null",
";",
"}",
"ApacheHttpRequest",
"<",
"GenericRecord",
">",
"request",
"=",
"new",
"ApacheHttpRequest",
"<>",
"(",
")",
";",
"HttpOperation",
"httpOperation",
"=",
"HttpUtils",
".",
"toHttpOperation",
"(",
"record",
".",
"getRecord",
"(",
")",
")",
";",
"// Set uri",
"URI",
"uri",
"=",
"HttpUtils",
".",
"buildURI",
"(",
"urlTemplate",
",",
"httpOperation",
".",
"getKeys",
"(",
")",
",",
"httpOperation",
".",
"getQueryParams",
"(",
")",
")",
";",
"if",
"(",
"uri",
"==",
"null",
")",
"{",
"return",
"null",
";",
"}",
"RequestBuilder",
"builder",
"=",
"RequestBuilder",
".",
"create",
"(",
"verb",
".",
"toUpperCase",
"(",
")",
")",
";",
"builder",
".",
"setUri",
"(",
"uri",
")",
";",
"// Set headers",
"Map",
"<",
"String",
",",
"String",
">",
"headers",
"=",
"httpOperation",
".",
"getHeaders",
"(",
")",
";",
"if",
"(",
"headers",
"!=",
"null",
"&&",
"headers",
".",
"size",
"(",
")",
"!=",
"0",
")",
"{",
"for",
"(",
"Map",
".",
"Entry",
"<",
"String",
",",
"String",
">",
"header",
":",
"headers",
".",
"entrySet",
"(",
")",
")",
"{",
"builder",
".",
"setHeader",
"(",
"header",
".",
"getKey",
"(",
")",
",",
"header",
".",
"getValue",
"(",
")",
")",
";",
"}",
"}",
"// Add payload",
"int",
"bytesWritten",
"=",
"addPayload",
"(",
"builder",
",",
"httpOperation",
".",
"getBody",
"(",
")",
")",
";",
"if",
"(",
"bytesWritten",
"==",
"-",
"1",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Fail to write payload into request\"",
")",
";",
"}",
"request",
".",
"setRawRequest",
"(",
"build",
"(",
"builder",
")",
")",
";",
"request",
".",
"markRecord",
"(",
"record",
",",
"bytesWritten",
")",
";",
"return",
"request",
";",
"}"
] | Build a write request from a single record | [
"Build",
"a",
"write",
"request",
"from",
"a",
"single",
"record"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-http/src/main/java/org/apache/gobblin/http/ApacheHttpRequestBuilder.java#L68-L102 |
25,710 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java | EmbeddedGobblinDistcp.update | @CliObjectOption(description = "Specifies files should be updated if they're different in the source.")
public EmbeddedGobblinDistcp update() {
this.setConfiguration(RecursiveCopyableDataset.UPDATE_KEY, Boolean.toString(true));
return this;
} | java | @CliObjectOption(description = "Specifies files should be updated if they're different in the source.")
public EmbeddedGobblinDistcp update() {
this.setConfiguration(RecursiveCopyableDataset.UPDATE_KEY, Boolean.toString(true));
return this;
} | [
"@",
"CliObjectOption",
"(",
"description",
"=",
"\"Specifies files should be updated if they're different in the source.\"",
")",
"public",
"EmbeddedGobblinDistcp",
"update",
"(",
")",
"{",
"this",
".",
"setConfiguration",
"(",
"RecursiveCopyableDataset",
".",
"UPDATE_KEY",
",",
"Boolean",
".",
"toString",
"(",
"true",
")",
")",
";",
"return",
"this",
";",
"}"
] | Specifies that files in the target should be updated if they have changed in the source. Equivalent to -update
option in Hadoop distcp. | [
"Specifies",
"that",
"files",
"in",
"the",
"target",
"should",
"be",
"updated",
"if",
"they",
"have",
"changed",
"in",
"the",
"source",
".",
"Equivalent",
"to",
"-",
"update",
"option",
"in",
"Hadoop",
"distcp",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java#L92-L96 |
25,711 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java | EmbeddedGobblinDistcp.delete | @CliObjectOption(description = "Delete files in target that don't exist on source.")
public EmbeddedGobblinDistcp delete() {
this.setConfiguration(RecursiveCopyableDataset.DELETE_KEY, Boolean.toString(true));
return this;
} | java | @CliObjectOption(description = "Delete files in target that don't exist on source.")
public EmbeddedGobblinDistcp delete() {
this.setConfiguration(RecursiveCopyableDataset.DELETE_KEY, Boolean.toString(true));
return this;
} | [
"@",
"CliObjectOption",
"(",
"description",
"=",
"\"Delete files in target that don't exist on source.\"",
")",
"public",
"EmbeddedGobblinDistcp",
"delete",
"(",
")",
"{",
"this",
".",
"setConfiguration",
"(",
"RecursiveCopyableDataset",
".",
"DELETE_KEY",
",",
"Boolean",
".",
"toString",
"(",
"true",
")",
")",
";",
"return",
"this",
";",
"}"
] | Specifies that files in the target that don't exist in the source should be deleted. Equivalent to -delete
option in Hadoop distcp. | [
"Specifies",
"that",
"files",
"in",
"the",
"target",
"that",
"don",
"t",
"exist",
"in",
"the",
"source",
"should",
"be",
"deleted",
".",
"Equivalent",
"to",
"-",
"delete",
"option",
"in",
"Hadoop",
"distcp",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java#L102-L106 |
25,712 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java | EmbeddedGobblinDistcp.setTemplate | @Override
@NotOnCli
public EmbeddedGobblin setTemplate(String templateURI)
throws URISyntaxException, SpecNotFoundException, JobTemplate.TemplateException {
return super.setTemplate(templateURI);
} | java | @Override
@NotOnCli
public EmbeddedGobblin setTemplate(String templateURI)
throws URISyntaxException, SpecNotFoundException, JobTemplate.TemplateException {
return super.setTemplate(templateURI);
} | [
"@",
"Override",
"@",
"NotOnCli",
"public",
"EmbeddedGobblin",
"setTemplate",
"(",
"String",
"templateURI",
")",
"throws",
"URISyntaxException",
",",
"SpecNotFoundException",
",",
"JobTemplate",
".",
"TemplateException",
"{",
"return",
"super",
".",
"setTemplate",
"(",
"templateURI",
")",
";",
"}"
] | Remove template from CLI | [
"Remove",
"template",
"from",
"CLI"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/runtime/embedded/EmbeddedGobblinDistcp.java#L126-L131 |
25,713 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java | HiveTask.addFile | public static void addFile(State state, String file) {
state.setProp(ADD_FILES, state.getProp(ADD_FILES, "") + "," + file);
} | java | public static void addFile(State state, String file) {
state.setProp(ADD_FILES, state.getProp(ADD_FILES, "") + "," + file);
} | [
"public",
"static",
"void",
"addFile",
"(",
"State",
"state",
",",
"String",
"file",
")",
"{",
"state",
".",
"setProp",
"(",
"ADD_FILES",
",",
"state",
".",
"getProp",
"(",
"ADD_FILES",
",",
"\"\"",
")",
"+",
"\",\"",
"+",
"file",
")",
";",
"}"
] | Add the input file to the Hive session before running the task. | [
"Add",
"the",
"input",
"file",
"to",
"the",
"Hive",
"session",
"before",
"running",
"the",
"task",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java#L73-L75 |
25,714 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java | HiveTask.addJar | public static void addJar(State state, String jar) {
state.setProp(ADD_JARS, state.getProp(ADD_JARS, "") + "," + jar);
} | java | public static void addJar(State state, String jar) {
state.setProp(ADD_JARS, state.getProp(ADD_JARS, "") + "," + jar);
} | [
"public",
"static",
"void",
"addJar",
"(",
"State",
"state",
",",
"String",
"jar",
")",
"{",
"state",
".",
"setProp",
"(",
"ADD_JARS",
",",
"state",
".",
"getProp",
"(",
"ADD_JARS",
",",
"\"\"",
")",
"+",
"\",\"",
"+",
"jar",
")",
";",
"}"
] | Add the input jar to the Hive session before running the task. | [
"Add",
"the",
"input",
"jar",
"to",
"the",
"Hive",
"session",
"before",
"running",
"the",
"task",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java#L80-L82 |
25,715 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java | HiveTask.addSetupQuery | public static void addSetupQuery(State state, String query) {
state.setProp(SETUP_QUERIES, state.getProp(SETUP_QUERIES, "") + ";" + query);
} | java | public static void addSetupQuery(State state, String query) {
state.setProp(SETUP_QUERIES, state.getProp(SETUP_QUERIES, "") + ";" + query);
} | [
"public",
"static",
"void",
"addSetupQuery",
"(",
"State",
"state",
",",
"String",
"query",
")",
"{",
"state",
".",
"setProp",
"(",
"SETUP_QUERIES",
",",
"state",
".",
"getProp",
"(",
"SETUP_QUERIES",
",",
"\"\"",
")",
"+",
"\";\"",
"+",
"query",
")",
";",
"}"
] | Run the specified setup query on the Hive session before running the task. | [
"Run",
"the",
"specified",
"setup",
"query",
"on",
"the",
"Hive",
"session",
"before",
"running",
"the",
"task",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveTask.java#L87-L89 |
25,716 | apache/incubator-gobblin | gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/PartitionUtils.java | PartitionUtils.getQuotedString | public static String getQuotedString(String st) {
Preconditions.checkNotNull(st);
String quotedString = "";
if (!st.startsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
quotedString += st;
if (!st.endsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
return quotedString;
} | java | public static String getQuotedString(String st) {
Preconditions.checkNotNull(st);
String quotedString = "";
if (!st.startsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
quotedString += st;
if (!st.endsWith(SINGLE_QUOTE)) {
quotedString += SINGLE_QUOTE;
}
return quotedString;
} | [
"public",
"static",
"String",
"getQuotedString",
"(",
"String",
"st",
")",
"{",
"Preconditions",
".",
"checkNotNull",
"(",
"st",
")",
";",
"String",
"quotedString",
"=",
"\"\"",
";",
"if",
"(",
"!",
"st",
".",
"startsWith",
"(",
"SINGLE_QUOTE",
")",
")",
"{",
"quotedString",
"+=",
"SINGLE_QUOTE",
";",
"}",
"quotedString",
"+=",
"st",
";",
"if",
"(",
"!",
"st",
".",
"endsWith",
"(",
"SINGLE_QUOTE",
")",
")",
"{",
"quotedString",
"+=",
"SINGLE_QUOTE",
";",
"}",
"return",
"quotedString",
";",
"}"
] | Add single quotes to the string, if not present.
TestString will be converted to 'TestString' | [
"Add",
"single",
"quotes",
"to",
"the",
"string",
"if",
"not",
"present",
".",
"TestString",
"will",
"be",
"converted",
"to",
"TestString"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/PartitionUtils.java#L38-L49 |
25,717 | apache/incubator-gobblin | gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/PartitionUtils.java | PartitionUtils.isUnixTimeStamp | public static boolean isUnixTimeStamp(String timeStamp) {
if (timeStamp.length() != ComplianceConfigurationKeys.TIME_STAMP_LENGTH) {
return false;
}
try {
Long.parseLong(timeStamp);
return true;
} catch (NumberFormatException e) {
return false;
}
} | java | public static boolean isUnixTimeStamp(String timeStamp) {
if (timeStamp.length() != ComplianceConfigurationKeys.TIME_STAMP_LENGTH) {
return false;
}
try {
Long.parseLong(timeStamp);
return true;
} catch (NumberFormatException e) {
return false;
}
} | [
"public",
"static",
"boolean",
"isUnixTimeStamp",
"(",
"String",
"timeStamp",
")",
"{",
"if",
"(",
"timeStamp",
".",
"length",
"(",
")",
"!=",
"ComplianceConfigurationKeys",
".",
"TIME_STAMP_LENGTH",
")",
"{",
"return",
"false",
";",
"}",
"try",
"{",
"Long",
".",
"parseLong",
"(",
"timeStamp",
")",
";",
"return",
"true",
";",
"}",
"catch",
"(",
"NumberFormatException",
"e",
")",
"{",
"return",
"false",
";",
"}",
"}"
] | Check if a given string is a valid unixTimeStamp | [
"Check",
"if",
"a",
"given",
"string",
"is",
"a",
"valid",
"unixTimeStamp"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/utils/PartitionUtils.java#L71-L81 |
25,718 | apache/incubator-gobblin | gobblin-metrics-libs/gobblin-metrics/src/main/java/org/apache/gobblin/metrics/GobblinMetrics.java | GobblinMetrics.stopMetricsReporting | public void stopMetricsReporting() {
LOGGER.info("Metrics reporting will be stopped: GobblinMetrics {}", this.toString());
if (!this.metricsReportingStarted) {
LOGGER.warn("Metric reporting has not started yet");
return;
}
// Stop the JMX reporter
if (this.jmxReporter.isPresent()) {
this.jmxReporter.get().stop();
}
// Trigger and stop reporters that implement org.apache.gobblin.metrics.report.ScheduledReporter
RootMetricContext.get().stopReporting();
// Trigger and stop reporters that implement com.codahale.metrics.ScheduledReporter
for (com.codahale.metrics.ScheduledReporter scheduledReporter : this.codahaleScheduledReporters) {
scheduledReporter.report();
}
try {
this.codahaleReportersCloser.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close metric output stream for job " + this.id, ioe);
} catch (Exception e) {
LOGGER.error("Failed to close metric output stream for job {} due to {}", this.id, ExceptionUtils.getFullStackTrace(e));
throw e;
}
this.metricsReportingStarted = false;
// Remove from the cache registry
GobblinMetrics.remove(id);
LOGGER.info("Metrics reporting stopped successfully");
} | java | public void stopMetricsReporting() {
LOGGER.info("Metrics reporting will be stopped: GobblinMetrics {}", this.toString());
if (!this.metricsReportingStarted) {
LOGGER.warn("Metric reporting has not started yet");
return;
}
// Stop the JMX reporter
if (this.jmxReporter.isPresent()) {
this.jmxReporter.get().stop();
}
// Trigger and stop reporters that implement org.apache.gobblin.metrics.report.ScheduledReporter
RootMetricContext.get().stopReporting();
// Trigger and stop reporters that implement com.codahale.metrics.ScheduledReporter
for (com.codahale.metrics.ScheduledReporter scheduledReporter : this.codahaleScheduledReporters) {
scheduledReporter.report();
}
try {
this.codahaleReportersCloser.close();
} catch (IOException ioe) {
LOGGER.error("Failed to close metric output stream for job " + this.id, ioe);
} catch (Exception e) {
LOGGER.error("Failed to close metric output stream for job {} due to {}", this.id, ExceptionUtils.getFullStackTrace(e));
throw e;
}
this.metricsReportingStarted = false;
// Remove from the cache registry
GobblinMetrics.remove(id);
LOGGER.info("Metrics reporting stopped successfully");
} | [
"public",
"void",
"stopMetricsReporting",
"(",
")",
"{",
"LOGGER",
".",
"info",
"(",
"\"Metrics reporting will be stopped: GobblinMetrics {}\"",
",",
"this",
".",
"toString",
"(",
")",
")",
";",
"if",
"(",
"!",
"this",
".",
"metricsReportingStarted",
")",
"{",
"LOGGER",
".",
"warn",
"(",
"\"Metric reporting has not started yet\"",
")",
";",
"return",
";",
"}",
"// Stop the JMX reporter",
"if",
"(",
"this",
".",
"jmxReporter",
".",
"isPresent",
"(",
")",
")",
"{",
"this",
".",
"jmxReporter",
".",
"get",
"(",
")",
".",
"stop",
"(",
")",
";",
"}",
"// Trigger and stop reporters that implement org.apache.gobblin.metrics.report.ScheduledReporter",
"RootMetricContext",
".",
"get",
"(",
")",
".",
"stopReporting",
"(",
")",
";",
"// Trigger and stop reporters that implement com.codahale.metrics.ScheduledReporter",
"for",
"(",
"com",
".",
"codahale",
".",
"metrics",
".",
"ScheduledReporter",
"scheduledReporter",
":",
"this",
".",
"codahaleScheduledReporters",
")",
"{",
"scheduledReporter",
".",
"report",
"(",
")",
";",
"}",
"try",
"{",
"this",
".",
"codahaleReportersCloser",
".",
"close",
"(",
")",
";",
"}",
"catch",
"(",
"IOException",
"ioe",
")",
"{",
"LOGGER",
".",
"error",
"(",
"\"Failed to close metric output stream for job \"",
"+",
"this",
".",
"id",
",",
"ioe",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"LOGGER",
".",
"error",
"(",
"\"Failed to close metric output stream for job {} due to {}\"",
",",
"this",
".",
"id",
",",
"ExceptionUtils",
".",
"getFullStackTrace",
"(",
"e",
")",
")",
";",
"throw",
"e",
";",
"}",
"this",
".",
"metricsReportingStarted",
"=",
"false",
";",
"// Remove from the cache registry",
"GobblinMetrics",
".",
"remove",
"(",
"id",
")",
";",
"LOGGER",
".",
"info",
"(",
"\"Metrics reporting stopped successfully\"",
")",
";",
"}"
] | Stop metric reporting. | [
"Stop",
"metric",
"reporting",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-metrics-libs/gobblin-metrics/src/main/java/org/apache/gobblin/metrics/GobblinMetrics.java#L435-L470 |
25,719 | apache/incubator-gobblin | gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_executorInstance/InMemorySpecExecutor.java | InMemorySpecExecutor.createDummySpecExecutor | public static SpecExecutor createDummySpecExecutor(URI uri) {
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, uri.toString());
return new InMemorySpecExecutor(ConfigFactory.parseProperties(properties));
} | java | public static SpecExecutor createDummySpecExecutor(URI uri) {
Properties properties = new Properties();
properties.setProperty(ConfigurationKeys.SPECEXECUTOR_INSTANCE_URI_KEY, uri.toString());
return new InMemorySpecExecutor(ConfigFactory.parseProperties(properties));
} | [
"public",
"static",
"SpecExecutor",
"createDummySpecExecutor",
"(",
"URI",
"uri",
")",
"{",
"Properties",
"properties",
"=",
"new",
"Properties",
"(",
")",
";",
"properties",
".",
"setProperty",
"(",
"ConfigurationKeys",
".",
"SPECEXECUTOR_INSTANCE_URI_KEY",
",",
"uri",
".",
"toString",
"(",
")",
")",
";",
"return",
"new",
"InMemorySpecExecutor",
"(",
"ConfigFactory",
".",
"parseProperties",
"(",
"properties",
")",
")",
";",
"}"
] | A creator that create a SpecExecutor only specifying URI for uniqueness.
@param uri | [
"A",
"creator",
"that",
"create",
"a",
"SpecExecutor",
"only",
"specifying",
"URI",
"for",
"uniqueness",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-runtime/src/main/java/org/apache/gobblin/runtime/spec_executorInstance/InMemorySpecExecutor.java#L67-L71 |
25,720 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedAvroWriterPartitioner.java | TimeBasedAvroWriterPartitioner.getRecordTimestamp | private static long getRecordTimestamp(Optional<Object> writerPartitionColumnValue) {
return writerPartitionColumnValue.orNull() instanceof Long ? (Long) writerPartitionColumnValue.get()
: System.currentTimeMillis();
} | java | private static long getRecordTimestamp(Optional<Object> writerPartitionColumnValue) {
return writerPartitionColumnValue.orNull() instanceof Long ? (Long) writerPartitionColumnValue.get()
: System.currentTimeMillis();
} | [
"private",
"static",
"long",
"getRecordTimestamp",
"(",
"Optional",
"<",
"Object",
">",
"writerPartitionColumnValue",
")",
"{",
"return",
"writerPartitionColumnValue",
".",
"orNull",
"(",
")",
"instanceof",
"Long",
"?",
"(",
"Long",
")",
"writerPartitionColumnValue",
".",
"get",
"(",
")",
":",
"System",
".",
"currentTimeMillis",
"(",
")",
";",
"}"
] | Check if the partition column value is present and is a Long object. Otherwise, use current system time. | [
"Check",
"if",
"the",
"partition",
"column",
"value",
"is",
"present",
"and",
"is",
"a",
"Long",
"object",
".",
"Otherwise",
"use",
"current",
"system",
"time",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedAvroWriterPartitioner.java#L71-L74 |
25,721 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedAvroWriterPartitioner.java | TimeBasedAvroWriterPartitioner.getWriterPartitionColumnValue | private Optional<Object> getWriterPartitionColumnValue(GenericRecord record) {
if (!this.partitionColumns.isPresent()) {
return Optional.absent();
}
for (String partitionColumn : this.partitionColumns.get()) {
Optional<Object> fieldValue = AvroUtils.getFieldValue(record, partitionColumn);
if (fieldValue.isPresent()) {
return fieldValue;
}
}
return Optional.absent();
} | java | private Optional<Object> getWriterPartitionColumnValue(GenericRecord record) {
if (!this.partitionColumns.isPresent()) {
return Optional.absent();
}
for (String partitionColumn : this.partitionColumns.get()) {
Optional<Object> fieldValue = AvroUtils.getFieldValue(record, partitionColumn);
if (fieldValue.isPresent()) {
return fieldValue;
}
}
return Optional.absent();
} | [
"private",
"Optional",
"<",
"Object",
">",
"getWriterPartitionColumnValue",
"(",
"GenericRecord",
"record",
")",
"{",
"if",
"(",
"!",
"this",
".",
"partitionColumns",
".",
"isPresent",
"(",
")",
")",
"{",
"return",
"Optional",
".",
"absent",
"(",
")",
";",
"}",
"for",
"(",
"String",
"partitionColumn",
":",
"this",
".",
"partitionColumns",
".",
"get",
"(",
")",
")",
"{",
"Optional",
"<",
"Object",
">",
"fieldValue",
"=",
"AvroUtils",
".",
"getFieldValue",
"(",
"record",
",",
"partitionColumn",
")",
";",
"if",
"(",
"fieldValue",
".",
"isPresent",
"(",
")",
")",
"{",
"return",
"fieldValue",
";",
"}",
"}",
"return",
"Optional",
".",
"absent",
"(",
")",
";",
"}"
] | Retrieve the value of the partition column field specified by this.partitionColumns | [
"Retrieve",
"the",
"value",
"of",
"the",
"partition",
"column",
"field",
"specified",
"by",
"this",
".",
"partitionColumns"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/writer/partitioner/TimeBasedAvroWriterPartitioner.java#L79-L91 |
25,722 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/CallbacksDispatcher.java | CallbacksDispatcher.addWeakListener | public synchronized void addWeakListener(L listener) {
Preconditions.checkNotNull(listener);
_log.info("Adding a weak listener " + listener);
_autoListeners.put(listener, null);
} | java | public synchronized void addWeakListener(L listener) {
Preconditions.checkNotNull(listener);
_log.info("Adding a weak listener " + listener);
_autoListeners.put(listener, null);
} | [
"public",
"synchronized",
"void",
"addWeakListener",
"(",
"L",
"listener",
")",
"{",
"Preconditions",
".",
"checkNotNull",
"(",
"listener",
")",
";",
"_log",
".",
"info",
"(",
"\"Adding a weak listener \"",
"+",
"listener",
")",
";",
"_autoListeners",
".",
"put",
"(",
"listener",
",",
"null",
")",
";",
"}"
] | Only weak references are stored for weak listeners. They will be removed from the dispatcher
automatically, once the listener objects are GCed. Note that weak listeners cannot be removed
explicitly. | [
"Only",
"weak",
"references",
"are",
"stored",
"for",
"weak",
"listeners",
".",
"They",
"will",
"be",
"removed",
"from",
"the",
"dispatcher",
"automatically",
"once",
"the",
"listener",
"objects",
"are",
"GCed",
".",
"Note",
"that",
"weak",
"listeners",
"cannot",
"be",
"removed",
"explicitly",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/callbacks/CallbacksDispatcher.java#L118-L123 |
25,723 | apache/incubator-gobblin | gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanDagFactory.java | JobExecutionPlanDagFactory.getDependencies | private static List<String> getDependencies(Config config) {
return config.hasPath(ConfigurationKeys.JOB_DEPENDENCIES) ? Arrays
.asList(config.getString(ConfigurationKeys.JOB_DEPENDENCIES).split(",")) : new ArrayList<>();
} | java | private static List<String> getDependencies(Config config) {
return config.hasPath(ConfigurationKeys.JOB_DEPENDENCIES) ? Arrays
.asList(config.getString(ConfigurationKeys.JOB_DEPENDENCIES).split(",")) : new ArrayList<>();
} | [
"private",
"static",
"List",
"<",
"String",
">",
"getDependencies",
"(",
"Config",
"config",
")",
"{",
"return",
"config",
".",
"hasPath",
"(",
"ConfigurationKeys",
".",
"JOB_DEPENDENCIES",
")",
"?",
"Arrays",
".",
"asList",
"(",
"config",
".",
"getString",
"(",
"ConfigurationKeys",
".",
"JOB_DEPENDENCIES",
")",
".",
"split",
"(",
"\",\"",
")",
")",
":",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"}"
] | Get job dependencies of a given job from its config.
@param config of a job.
@return a list of dependencies of the job. | [
"Get",
"job",
"dependencies",
"of",
"a",
"given",
"job",
"from",
"its",
"config",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-service/src/main/java/org/apache/gobblin/service/modules/spec/JobExecutionPlanDagFactory.java#L90-L93 |
25,724 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/util/DecoratorUtils.java | DecoratorUtils.getDecoratorLineage | public static List<Object> getDecoratorLineage(Object obj) {
List<Object> lineage = Lists.newArrayList(obj);
Object currentObject = obj;
while(currentObject instanceof Decorator) {
currentObject = ((Decorator)currentObject).getDecoratedObject();
lineage.add(currentObject);
}
return Lists.reverse(lineage);
} | java | public static List<Object> getDecoratorLineage(Object obj) {
List<Object> lineage = Lists.newArrayList(obj);
Object currentObject = obj;
while(currentObject instanceof Decorator) {
currentObject = ((Decorator)currentObject).getDecoratedObject();
lineage.add(currentObject);
}
return Lists.reverse(lineage);
} | [
"public",
"static",
"List",
"<",
"Object",
">",
"getDecoratorLineage",
"(",
"Object",
"obj",
")",
"{",
"List",
"<",
"Object",
">",
"lineage",
"=",
"Lists",
".",
"newArrayList",
"(",
"obj",
")",
";",
"Object",
"currentObject",
"=",
"obj",
";",
"while",
"(",
"currentObject",
"instanceof",
"Decorator",
")",
"{",
"currentObject",
"=",
"(",
"(",
"Decorator",
")",
"currentObject",
")",
".",
"getDecoratedObject",
"(",
")",
";",
"lineage",
".",
"add",
"(",
"currentObject",
")",
";",
"}",
"return",
"Lists",
".",
"reverse",
"(",
"lineage",
")",
";",
"}"
] | Finds the decorator lineage of the given object.
<p>
If object is not a {@link org.apache.gobblin.util.Decorator}, this method will return a singleton list with just the object.
If object is a {@link org.apache.gobblin.util.Decorator}, it will return a list of the underlying object followed by the
decorator lineage up to the input decorator object.
</p>
@param obj an object.
@return List of the non-decorator underlying object and all decorators on top of it,
starting with underlying object and ending with the input object itself (inclusive). | [
"Finds",
"the",
"decorator",
"lineage",
"of",
"the",
"given",
"object",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/util/DecoratorUtils.java#L56-L65 |
25,725 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java | MRCompactorJobRunner.getCompactionTimestamp | private DateTime getCompactionTimestamp() throws IOException {
DateTimeZone timeZone = DateTimeZone.forID(
this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
if (!this.recompactFromDestPaths) {
return new DateTime(timeZone);
}
Set<Path> inputPaths = getInputPaths();
long maxTimestamp = Long.MIN_VALUE;
for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, inputPaths)) {
maxTimestamp = Math.max(maxTimestamp, status.getModificationTime());
}
return maxTimestamp == Long.MIN_VALUE ? new DateTime(timeZone) : new DateTime(maxTimestamp, timeZone);
} | java | private DateTime getCompactionTimestamp() throws IOException {
DateTimeZone timeZone = DateTimeZone.forID(
this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE));
if (!this.recompactFromDestPaths) {
return new DateTime(timeZone);
}
Set<Path> inputPaths = getInputPaths();
long maxTimestamp = Long.MIN_VALUE;
for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, inputPaths)) {
maxTimestamp = Math.max(maxTimestamp, status.getModificationTime());
}
return maxTimestamp == Long.MIN_VALUE ? new DateTime(timeZone) : new DateTime(maxTimestamp, timeZone);
} | [
"private",
"DateTime",
"getCompactionTimestamp",
"(",
")",
"throws",
"IOException",
"{",
"DateTimeZone",
"timeZone",
"=",
"DateTimeZone",
".",
"forID",
"(",
"this",
".",
"dataset",
".",
"jobProps",
"(",
")",
".",
"getProp",
"(",
"MRCompactor",
".",
"COMPACTION_TIMEZONE",
",",
"MRCompactor",
".",
"DEFAULT_COMPACTION_TIMEZONE",
")",
")",
";",
"if",
"(",
"!",
"this",
".",
"recompactFromDestPaths",
")",
"{",
"return",
"new",
"DateTime",
"(",
"timeZone",
")",
";",
"}",
"Set",
"<",
"Path",
">",
"inputPaths",
"=",
"getInputPaths",
"(",
")",
";",
"long",
"maxTimestamp",
"=",
"Long",
".",
"MIN_VALUE",
";",
"for",
"(",
"FileStatus",
"status",
":",
"FileListUtils",
".",
"listFilesRecursively",
"(",
"this",
".",
"fs",
",",
"inputPaths",
")",
")",
"{",
"maxTimestamp",
"=",
"Math",
".",
"max",
"(",
"maxTimestamp",
",",
"status",
".",
"getModificationTime",
"(",
")",
")",
";",
"}",
"return",
"maxTimestamp",
"==",
"Long",
".",
"MIN_VALUE",
"?",
"new",
"DateTime",
"(",
"timeZone",
")",
":",
"new",
"DateTime",
"(",
"maxTimestamp",
",",
"timeZone",
")",
";",
"}"
] | For regular compactions, compaction timestamp is the time the compaction job starts.
If this is a recompaction from output paths, the compaction timestamp will remain the same as previously
persisted compaction time. This is because such a recompaction doesn't consume input data, so next time,
whether a file in the input folder is considered late file should still be based on the previous compaction
timestamp. | [
"For",
"regular",
"compactions",
"compaction",
"timestamp",
"is",
"the",
"time",
"the",
"compaction",
"job",
"starts",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java#L372-L386 |
25,726 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java | MRCompactorJobRunner.submitSlaEvent | private void submitSlaEvent(Job job) {
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME)
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputLatePath(), this.fs))))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputPath(), this.fs))))
.additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME,
Boolean.toString(this.dataset.needToRecompact())).build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit compaction completed event:" + e, e);
}
} | java | private void submitSlaEvent(Job job) {
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.of(job), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_COMPLETED_EVENT_NAME)
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(this.lateOutputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputLatePath(), this.fs))))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(this.outputRecordCountProvider.getRecordCount(this.getApplicableFilePaths(this.dataset
.outputPath(), this.fs))))
.additionalMetadata(CompactionSlaEventHelper.RECOMPATED_METADATA_NAME,
Boolean.toString(this.dataset.needToRecompact())).build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit compaction completed event:" + e, e);
}
} | [
"private",
"void",
"submitSlaEvent",
"(",
"Job",
"job",
")",
"{",
"try",
"{",
"CompactionSlaEventHelper",
".",
"getEventSubmitterBuilder",
"(",
"this",
".",
"dataset",
",",
"Optional",
".",
"of",
"(",
"job",
")",
",",
"this",
".",
"fs",
")",
".",
"eventSubmitter",
"(",
"this",
".",
"eventSubmitter",
")",
".",
"eventName",
"(",
"CompactionSlaEventHelper",
".",
"COMPACTION_COMPLETED_EVENT_NAME",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"LATE_RECORD_COUNT",
",",
"Long",
".",
"toString",
"(",
"this",
".",
"lateOutputRecordCountProvider",
".",
"getRecordCount",
"(",
"this",
".",
"getApplicableFilePaths",
"(",
"this",
".",
"dataset",
".",
"outputLatePath",
"(",
")",
",",
"this",
".",
"fs",
")",
")",
")",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"REGULAR_RECORD_COUNT",
",",
"Long",
".",
"toString",
"(",
"this",
".",
"outputRecordCountProvider",
".",
"getRecordCount",
"(",
"this",
".",
"getApplicableFilePaths",
"(",
"this",
".",
"dataset",
".",
"outputPath",
"(",
")",
",",
"this",
".",
"fs",
")",
")",
")",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"RECOMPATED_METADATA_NAME",
",",
"Boolean",
".",
"toString",
"(",
"this",
".",
"dataset",
".",
"needToRecompact",
"(",
")",
")",
")",
".",
"build",
"(",
")",
".",
"submit",
"(",
")",
";",
"}",
"catch",
"(",
"Throwable",
"e",
")",
"{",
"LOG",
".",
"warn",
"(",
"\"Failed to submit compaction completed event:\"",
"+",
"e",
",",
"e",
")",
";",
"}",
"}"
] | Submit an event when compaction MR job completes | [
"Submit",
"an",
"event",
"when",
"compaction",
"MR",
"job",
"completes"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java#L696-L715 |
25,727 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java | MRCompactorJobRunner.submitRecordsCountsEvent | private void submitRecordsCountsEvent() {
long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount();
long outputRecordCount = this.datasetHelper.getOutputRecordCount();
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT)
.additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString())
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(lateOutputRecordCount))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(outputRecordCount))
.additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact()))
.build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit late event count:" + e, e);
}
} | java | private void submitRecordsCountsEvent() {
long lateOutputRecordCount = this.datasetHelper.getLateOutputRecordCount();
long outputRecordCount = this.datasetHelper.getOutputRecordCount();
try {
CompactionSlaEventHelper
.getEventSubmitterBuilder(this.dataset, Optional.<Job> absent(), this.fs)
.eventSubmitter(this.eventSubmitter)
.eventName(CompactionSlaEventHelper.COMPACTION_RECORD_COUNT_EVENT)
.additionalMetadata(CompactionSlaEventHelper.DATASET_OUTPUT_PATH, this.dataset.outputPath().toString())
.additionalMetadata(
CompactionSlaEventHelper.LATE_RECORD_COUNT,
Long.toString(lateOutputRecordCount))
.additionalMetadata(
CompactionSlaEventHelper.REGULAR_RECORD_COUNT,
Long.toString(outputRecordCount))
.additionalMetadata(CompactionSlaEventHelper.NEED_RECOMPACT, Boolean.toString(this.dataset.needToRecompact()))
.build().submit();
} catch (Throwable e) {
LOG.warn("Failed to submit late event count:" + e, e);
}
} | [
"private",
"void",
"submitRecordsCountsEvent",
"(",
")",
"{",
"long",
"lateOutputRecordCount",
"=",
"this",
".",
"datasetHelper",
".",
"getLateOutputRecordCount",
"(",
")",
";",
"long",
"outputRecordCount",
"=",
"this",
".",
"datasetHelper",
".",
"getOutputRecordCount",
"(",
")",
";",
"try",
"{",
"CompactionSlaEventHelper",
".",
"getEventSubmitterBuilder",
"(",
"this",
".",
"dataset",
",",
"Optional",
".",
"<",
"Job",
">",
"absent",
"(",
")",
",",
"this",
".",
"fs",
")",
".",
"eventSubmitter",
"(",
"this",
".",
"eventSubmitter",
")",
".",
"eventName",
"(",
"CompactionSlaEventHelper",
".",
"COMPACTION_RECORD_COUNT_EVENT",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"DATASET_OUTPUT_PATH",
",",
"this",
".",
"dataset",
".",
"outputPath",
"(",
")",
".",
"toString",
"(",
")",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"LATE_RECORD_COUNT",
",",
"Long",
".",
"toString",
"(",
"lateOutputRecordCount",
")",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"REGULAR_RECORD_COUNT",
",",
"Long",
".",
"toString",
"(",
"outputRecordCount",
")",
")",
".",
"additionalMetadata",
"(",
"CompactionSlaEventHelper",
".",
"NEED_RECOMPACT",
",",
"Boolean",
".",
"toString",
"(",
"this",
".",
"dataset",
".",
"needToRecompact",
"(",
")",
")",
")",
".",
"build",
"(",
")",
".",
"submit",
"(",
")",
";",
"}",
"catch",
"(",
"Throwable",
"e",
")",
"{",
"LOG",
".",
"warn",
"(",
"\"Failed to submit late event count:\"",
"+",
"e",
",",
"e",
")",
";",
"}",
"}"
] | Submit an event reporting late record counts and non-late record counts. | [
"Submit",
"an",
"event",
"reporting",
"late",
"record",
"counts",
"and",
"non",
"-",
"late",
"record",
"counts",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobRunner.java#L720-L741 |
25,728 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/qualitychecker/row/RowLevelPolicyChecker.java | RowLevelPolicyChecker.processStream | @Override
public RecordStreamWithMetadata<D, S> processStream(RecordStreamWithMetadata<D, S> inputStream, WorkUnitState state) {
Flowable<StreamEntity<D>> filteredStream =
inputStream.getRecordStream().filter(r -> {
if (r instanceof ControlMessage) {
getMessageHandler().handleMessage((ControlMessage) r);
return true;
} else if (r instanceof RecordEnvelope) {
boolean accept = executePolicies(((RecordEnvelope) r).getRecord(), this.results);
if (!accept) {
r.ack();
}
return accept;
} else {
return true;
}
});
filteredStream = filteredStream.doFinally(this::close);
return inputStream.withRecordStream(filteredStream);
} | java | @Override
public RecordStreamWithMetadata<D, S> processStream(RecordStreamWithMetadata<D, S> inputStream, WorkUnitState state) {
Flowable<StreamEntity<D>> filteredStream =
inputStream.getRecordStream().filter(r -> {
if (r instanceof ControlMessage) {
getMessageHandler().handleMessage((ControlMessage) r);
return true;
} else if (r instanceof RecordEnvelope) {
boolean accept = executePolicies(((RecordEnvelope) r).getRecord(), this.results);
if (!accept) {
r.ack();
}
return accept;
} else {
return true;
}
});
filteredStream = filteredStream.doFinally(this::close);
return inputStream.withRecordStream(filteredStream);
} | [
"@",
"Override",
"public",
"RecordStreamWithMetadata",
"<",
"D",
",",
"S",
">",
"processStream",
"(",
"RecordStreamWithMetadata",
"<",
"D",
",",
"S",
">",
"inputStream",
",",
"WorkUnitState",
"state",
")",
"{",
"Flowable",
"<",
"StreamEntity",
"<",
"D",
">>",
"filteredStream",
"=",
"inputStream",
".",
"getRecordStream",
"(",
")",
".",
"filter",
"(",
"r",
"->",
"{",
"if",
"(",
"r",
"instanceof",
"ControlMessage",
")",
"{",
"getMessageHandler",
"(",
")",
".",
"handleMessage",
"(",
"(",
"ControlMessage",
")",
"r",
")",
";",
"return",
"true",
";",
"}",
"else",
"if",
"(",
"r",
"instanceof",
"RecordEnvelope",
")",
"{",
"boolean",
"accept",
"=",
"executePolicies",
"(",
"(",
"(",
"RecordEnvelope",
")",
"r",
")",
".",
"getRecord",
"(",
")",
",",
"this",
".",
"results",
")",
";",
"if",
"(",
"!",
"accept",
")",
"{",
"r",
".",
"ack",
"(",
")",
";",
"}",
"return",
"accept",
";",
"}",
"else",
"{",
"return",
"true",
";",
"}",
"}",
")",
";",
"filteredStream",
"=",
"filteredStream",
".",
"doFinally",
"(",
"this",
"::",
"close",
")",
";",
"return",
"inputStream",
".",
"withRecordStream",
"(",
"filteredStream",
")",
";",
"}"
] | Process the stream and drop any records that fail the quality check. | [
"Process",
"the",
"stream",
"and",
"drop",
"any",
"records",
"that",
"fail",
"the",
"quality",
"check",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/qualitychecker/row/RowLevelPolicyChecker.java#L135-L154 |
25,729 | apache/incubator-gobblin | gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/qualitychecker/InstrumentedRowLevelPolicyBase.java | InstrumentedRowLevelPolicyBase.afterCheck | public void afterCheck(Result result, long startTimeNanos) {
switch (result) {
case FAILED:
Instrumented.markMeter(this.failedRecordsMeter);
break;
case PASSED:
Instrumented.markMeter(this.passedRecordsMeter);
break;
default:
}
Instrumented.updateTimer(this.policyTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
} | java | public void afterCheck(Result result, long startTimeNanos) {
switch (result) {
case FAILED:
Instrumented.markMeter(this.failedRecordsMeter);
break;
case PASSED:
Instrumented.markMeter(this.passedRecordsMeter);
break;
default:
}
Instrumented.updateTimer(this.policyTimer, System.nanoTime() - startTimeNanos, TimeUnit.NANOSECONDS);
} | [
"public",
"void",
"afterCheck",
"(",
"Result",
"result",
",",
"long",
"startTimeNanos",
")",
"{",
"switch",
"(",
"result",
")",
"{",
"case",
"FAILED",
":",
"Instrumented",
".",
"markMeter",
"(",
"this",
".",
"failedRecordsMeter",
")",
";",
"break",
";",
"case",
"PASSED",
":",
"Instrumented",
".",
"markMeter",
"(",
"this",
".",
"passedRecordsMeter",
")",
";",
"break",
";",
"default",
":",
"}",
"Instrumented",
".",
"updateTimer",
"(",
"this",
".",
"policyTimer",
",",
"System",
".",
"nanoTime",
"(",
")",
"-",
"startTimeNanos",
",",
"TimeUnit",
".",
"NANOSECONDS",
")",
";",
"}"
] | Called after check is run.
@param result result from check.
@param startTimeNanos start time of check. | [
"Called",
"after",
"check",
"is",
"run",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core-base/src/main/java/org/apache/gobblin/instrumented/qualitychecker/InstrumentedRowLevelPolicyBase.java#L144-L156 |
25,730 | apache/incubator-gobblin | gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManager.java | HiveOrcSerDeManager.addSerDeProperties | @Override
public void addSerDeProperties(Path path, HiveRegistrationUnit hiveUnit)
throws IOException {
hiveUnit.setSerDeType(this.serDeWrapper.getSerDe().getClass().getName());
hiveUnit.setInputFormat(this.serDeWrapper.getInputFormatClassName());
hiveUnit.setOutputFormat(this.serDeWrapper.getOutputFormatClassName());
addSchemaProperties(path, hiveUnit);
} | java | @Override
public void addSerDeProperties(Path path, HiveRegistrationUnit hiveUnit)
throws IOException {
hiveUnit.setSerDeType(this.serDeWrapper.getSerDe().getClass().getName());
hiveUnit.setInputFormat(this.serDeWrapper.getInputFormatClassName());
hiveUnit.setOutputFormat(this.serDeWrapper.getOutputFormatClassName());
addSchemaProperties(path, hiveUnit);
} | [
"@",
"Override",
"public",
"void",
"addSerDeProperties",
"(",
"Path",
"path",
",",
"HiveRegistrationUnit",
"hiveUnit",
")",
"throws",
"IOException",
"{",
"hiveUnit",
".",
"setSerDeType",
"(",
"this",
".",
"serDeWrapper",
".",
"getSerDe",
"(",
")",
".",
"getClass",
"(",
")",
".",
"getName",
"(",
")",
")",
";",
"hiveUnit",
".",
"setInputFormat",
"(",
"this",
".",
"serDeWrapper",
".",
"getInputFormatClassName",
"(",
")",
")",
";",
"hiveUnit",
".",
"setOutputFormat",
"(",
"this",
".",
"serDeWrapper",
".",
"getOutputFormatClassName",
"(",
")",
")",
";",
"addSchemaProperties",
"(",
"path",
",",
"hiveUnit",
")",
";",
"}"
] | Add ORC SerDe attributes into HiveUnit
@param path
@param hiveUnit
@throws IOException | [
"Add",
"ORC",
"SerDe",
"attributes",
"into",
"HiveUnit"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManager.java#L128-L136 |
25,731 | apache/incubator-gobblin | gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManager.java | HiveOrcSerDeManager.addSchemaPropertiesHelper | protected void addSchemaPropertiesHelper(Path path, HiveRegistrationUnit hiveUnit) throws IOException {
TypeInfo schema = getSchemaFromLatestFile(path, this.fs);
if (schema instanceof StructTypeInfo) {
StructTypeInfo structTypeInfo = (StructTypeInfo) schema;
hiveUnit.setSerDeProp(SCHEMA_LITERAL, schema);
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMNS,
Joiner.on(",").join(structTypeInfo.getAllStructFieldNames()));
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMN_TYPES,
Joiner.on(",").join(
structTypeInfo.getAllStructFieldTypeInfos().stream().map(x -> x.getTypeName())
.collect(Collectors.toList())));
} else {
// Hive always uses a struct with a field for each of the top-level columns as the root object type.
// So for here we assume to-be-registered ORC files follow this pattern.
throw new IllegalStateException("A valid ORC schema should be an instance of struct");
}
} | java | protected void addSchemaPropertiesHelper(Path path, HiveRegistrationUnit hiveUnit) throws IOException {
TypeInfo schema = getSchemaFromLatestFile(path, this.fs);
if (schema instanceof StructTypeInfo) {
StructTypeInfo structTypeInfo = (StructTypeInfo) schema;
hiveUnit.setSerDeProp(SCHEMA_LITERAL, schema);
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMNS,
Joiner.on(",").join(structTypeInfo.getAllStructFieldNames()));
hiveUnit.setSerDeProp(serdeConstants.LIST_COLUMN_TYPES,
Joiner.on(",").join(
structTypeInfo.getAllStructFieldTypeInfos().stream().map(x -> x.getTypeName())
.collect(Collectors.toList())));
} else {
// Hive always uses a struct with a field for each of the top-level columns as the root object type.
// So for here we assume to-be-registered ORC files follow this pattern.
throw new IllegalStateException("A valid ORC schema should be an instance of struct");
}
} | [
"protected",
"void",
"addSchemaPropertiesHelper",
"(",
"Path",
"path",
",",
"HiveRegistrationUnit",
"hiveUnit",
")",
"throws",
"IOException",
"{",
"TypeInfo",
"schema",
"=",
"getSchemaFromLatestFile",
"(",
"path",
",",
"this",
".",
"fs",
")",
";",
"if",
"(",
"schema",
"instanceof",
"StructTypeInfo",
")",
"{",
"StructTypeInfo",
"structTypeInfo",
"=",
"(",
"StructTypeInfo",
")",
"schema",
";",
"hiveUnit",
".",
"setSerDeProp",
"(",
"SCHEMA_LITERAL",
",",
"schema",
")",
";",
"hiveUnit",
".",
"setSerDeProp",
"(",
"serdeConstants",
".",
"LIST_COLUMNS",
",",
"Joiner",
".",
"on",
"(",
"\",\"",
")",
".",
"join",
"(",
"structTypeInfo",
".",
"getAllStructFieldNames",
"(",
")",
")",
")",
";",
"hiveUnit",
".",
"setSerDeProp",
"(",
"serdeConstants",
".",
"LIST_COLUMN_TYPES",
",",
"Joiner",
".",
"on",
"(",
"\",\"",
")",
".",
"join",
"(",
"structTypeInfo",
".",
"getAllStructFieldTypeInfos",
"(",
")",
".",
"stream",
"(",
")",
".",
"map",
"(",
"x",
"->",
"x",
".",
"getTypeName",
"(",
")",
")",
".",
"collect",
"(",
"Collectors",
".",
"toList",
"(",
")",
")",
")",
")",
";",
"}",
"else",
"{",
"// Hive always uses a struct with a field for each of the top-level columns as the root object type.",
"// So for here we assume to-be-registered ORC files follow this pattern.",
"throw",
"new",
"IllegalStateException",
"(",
"\"A valid ORC schema should be an instance of struct\"",
")",
";",
"}",
"}"
] | Extensible if there's other source-of-truth for fetching schema instead of interacting with HDFS.
For purpose of initializing {@link org.apache.hadoop.hive.ql.io.orc.OrcSerde} object, it will require:
org.apache.hadoop.hive.serde.serdeConstants#LIST_COLUMNS and
org.apache.hadoop.hive.serde.serdeConstants#LIST_COLUMN_TYPES
Keeping {@link #SCHEMA_LITERAL} will be a nice-to-have thing but not actually necessary in terms of functionality. | [
"Extensible",
"if",
"there",
"s",
"other",
"source",
"-",
"of",
"-",
"truth",
"for",
"fetching",
"schema",
"instead",
"of",
"interacting",
"with",
"HDFS",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-hive-registration/src/main/java/org/apache/gobblin/hive/orc/HiveOrcSerDeManager.java#L257-L274 |
25,732 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/retry/RetryerFactory.java | RetryerFactory.newInstance | public static <T> Retryer<T> newInstance(Config config) {
config = config.withFallback(DEFAULTS);
RetryType type = RetryType.valueOf(config.getString(RETRY_TYPE).toUpperCase());
switch (type) {
case EXPONENTIAL:
return newExponentialRetryer(config);
case FIXED:
return newFixedRetryer(config);
default:
throw new IllegalArgumentException(type + " is not supported");
}
} | java | public static <T> Retryer<T> newInstance(Config config) {
config = config.withFallback(DEFAULTS);
RetryType type = RetryType.valueOf(config.getString(RETRY_TYPE).toUpperCase());
switch (type) {
case EXPONENTIAL:
return newExponentialRetryer(config);
case FIXED:
return newFixedRetryer(config);
default:
throw new IllegalArgumentException(type + " is not supported");
}
} | [
"public",
"static",
"<",
"T",
">",
"Retryer",
"<",
"T",
">",
"newInstance",
"(",
"Config",
"config",
")",
"{",
"config",
"=",
"config",
".",
"withFallback",
"(",
"DEFAULTS",
")",
";",
"RetryType",
"type",
"=",
"RetryType",
".",
"valueOf",
"(",
"config",
".",
"getString",
"(",
"RETRY_TYPE",
")",
".",
"toUpperCase",
"(",
")",
")",
";",
"switch",
"(",
"type",
")",
"{",
"case",
"EXPONENTIAL",
":",
"return",
"newExponentialRetryer",
"(",
"config",
")",
";",
"case",
"FIXED",
":",
"return",
"newFixedRetryer",
"(",
"config",
")",
";",
"default",
":",
"throw",
"new",
"IllegalArgumentException",
"(",
"type",
"+",
"\" is not supported\"",
")",
";",
"}",
"}"
] | Creates new instance of retryer based on the config.
Accepted config keys are defined in RetryerFactory as static member variable.
You can use State along with ConfigBuilder and config prefix to build config.
@param config
@return | [
"Creates",
"new",
"instance",
"of",
"retryer",
"based",
"on",
"the",
"config",
".",
"Accepted",
"config",
"keys",
"are",
"defined",
"in",
"RetryerFactory",
"as",
"static",
"member",
"variable",
".",
"You",
"can",
"use",
"State",
"along",
"with",
"ConfigBuilder",
"and",
"config",
"prefix",
"to",
"build",
"config",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/retry/RetryerFactory.java#L84-L96 |
25,733 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java | PasswordManager.getInstance | public static PasswordManager getInstance(State state) {
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
} | java | public static PasswordManager getInstance(State state) {
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
} | [
"public",
"static",
"PasswordManager",
"getInstance",
"(",
"State",
"state",
")",
"{",
"try",
"{",
"return",
"CACHED_INSTANCES",
".",
"get",
"(",
"new",
"CachedInstanceKey",
"(",
"state",
")",
")",
";",
"}",
"catch",
"(",
"ExecutionException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Unable to get an instance of PasswordManager\"",
",",
"e",
")",
";",
"}",
"}"
] | Get an instance. The location of the master password file is provided via "encrypt.key.loc". | [
"Get",
"an",
"instance",
".",
"The",
"location",
"of",
"the",
"master",
"password",
"file",
"is",
"provided",
"via",
"encrypt",
".",
"key",
".",
"loc",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java#L159-L166 |
25,734 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java | PasswordManager.getInstance | public static PasswordManager getInstance(Path masterPwdLoc) {
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdLoc.toString());
state.setProp(ConfigurationKeys.ENCRYPT_KEY_FS_URI, masterPwdLoc.toUri());
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
} | java | public static PasswordManager getInstance(Path masterPwdLoc) {
State state = new State();
state.setProp(ConfigurationKeys.ENCRYPT_KEY_LOC, masterPwdLoc.toString());
state.setProp(ConfigurationKeys.ENCRYPT_KEY_FS_URI, masterPwdLoc.toUri());
try {
return CACHED_INSTANCES
.get(new CachedInstanceKey(state));
} catch (ExecutionException e) {
throw new RuntimeException("Unable to get an instance of PasswordManager", e);
}
} | [
"public",
"static",
"PasswordManager",
"getInstance",
"(",
"Path",
"masterPwdLoc",
")",
"{",
"State",
"state",
"=",
"new",
"State",
"(",
")",
";",
"state",
".",
"setProp",
"(",
"ConfigurationKeys",
".",
"ENCRYPT_KEY_LOC",
",",
"masterPwdLoc",
".",
"toString",
"(",
")",
")",
";",
"state",
".",
"setProp",
"(",
"ConfigurationKeys",
".",
"ENCRYPT_KEY_FS_URI",
",",
"masterPwdLoc",
".",
"toUri",
"(",
")",
")",
";",
"try",
"{",
"return",
"CACHED_INSTANCES",
".",
"get",
"(",
"new",
"CachedInstanceKey",
"(",
"state",
")",
")",
";",
"}",
"catch",
"(",
"ExecutionException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Unable to get an instance of PasswordManager\"",
",",
"e",
")",
";",
"}",
"}"
] | Get an instance. The master password file is given by masterPwdLoc. | [
"Get",
"an",
"instance",
".",
"The",
"master",
"password",
"file",
"is",
"given",
"by",
"masterPwdLoc",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java#L178-L188 |
25,735 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java | PasswordManager.encryptPassword | public String encryptPassword(String plain) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for encrypting passwords.");
try {
return this.encryptors.get(0).encrypt(plain);
} catch (Exception e) {
throw new RuntimeException("Failed to encrypt password", e);
}
} | java | public String encryptPassword(String plain) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for encrypting passwords.");
try {
return this.encryptors.get(0).encrypt(plain);
} catch (Exception e) {
throw new RuntimeException("Failed to encrypt password", e);
}
} | [
"public",
"String",
"encryptPassword",
"(",
"String",
"plain",
")",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"this",
".",
"encryptors",
".",
"size",
"(",
")",
">",
"0",
",",
"\"A master password needs to be provided for encrypting passwords.\"",
")",
";",
"try",
"{",
"return",
"this",
".",
"encryptors",
".",
"get",
"(",
"0",
")",
".",
"encrypt",
"(",
"plain",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Failed to encrypt password\"",
",",
"e",
")",
";",
"}",
"}"
] | Encrypt a password. A master password must have been provided in the constructor.
@param plain A plain password to be encrypted.
@return The encrypted password. | [
"Encrypt",
"a",
"password",
".",
"A",
"master",
"password",
"must",
"have",
"been",
"provided",
"in",
"the",
"constructor",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java#L200-L209 |
25,736 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java | PasswordManager.decryptPassword | public String decryptPassword(String encrypted) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for decrypting passwords.");
for (TextEncryptor encryptor : encryptors) {
try {
return encryptor.decrypt(encrypted);
} catch (Exception e) {
LOG.warn("Failed attempt to decrypt secret {}", encrypted, e);
}
}
LOG.error("All {} decrypt attempt(s) failed.", encryptors.size());
throw new RuntimeException("Failed to decrypt password ENC(" + encrypted + ")");
} | java | public String decryptPassword(String encrypted) {
Preconditions.checkArgument(this.encryptors.size() > 0,
"A master password needs to be provided for decrypting passwords.");
for (TextEncryptor encryptor : encryptors) {
try {
return encryptor.decrypt(encrypted);
} catch (Exception e) {
LOG.warn("Failed attempt to decrypt secret {}", encrypted, e);
}
}
LOG.error("All {} decrypt attempt(s) failed.", encryptors.size());
throw new RuntimeException("Failed to decrypt password ENC(" + encrypted + ")");
} | [
"public",
"String",
"decryptPassword",
"(",
"String",
"encrypted",
")",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"this",
".",
"encryptors",
".",
"size",
"(",
")",
">",
"0",
",",
"\"A master password needs to be provided for decrypting passwords.\"",
")",
";",
"for",
"(",
"TextEncryptor",
"encryptor",
":",
"encryptors",
")",
"{",
"try",
"{",
"return",
"encryptor",
".",
"decrypt",
"(",
"encrypted",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"LOG",
".",
"warn",
"(",
"\"Failed attempt to decrypt secret {}\"",
",",
"encrypted",
",",
"e",
")",
";",
"}",
"}",
"LOG",
".",
"error",
"(",
"\"All {} decrypt attempt(s) failed.\"",
",",
"encryptors",
".",
"size",
"(",
")",
")",
";",
"throw",
"new",
"RuntimeException",
"(",
"\"Failed to decrypt password ENC(\"",
"+",
"encrypted",
"+",
"\")\"",
")",
";",
"}"
] | Decrypt an encrypted password. A master password file must have been provided in the constructor.
@param encrypted An encrypted password.
@return The decrypted password. | [
"Decrypt",
"an",
"encrypted",
"password",
".",
"A",
"master",
"password",
"file",
"must",
"have",
"been",
"provided",
"in",
"the",
"constructor",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/password/PasswordManager.java#L216-L229 |
25,737 | apache/incubator-gobblin | gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixDistributeJobExecutionLauncher.java | GobblinHelixDistributeJobExecutionLauncher.createJobBuilder | private JobConfig.Builder createJobBuilder (Properties jobProps) {
// Create a single task for job planning
String planningId = getPlanningJobId(jobProps);
Map<String, TaskConfig> taskConfigMap = Maps.newHashMap();
Map<String, String> rawConfigMap = Maps.newHashMap();
for (String key : jobProps.stringPropertyNames()) {
rawConfigMap.put(JOB_PROPS_PREFIX + key, (String)jobProps.get(key));
}
rawConfigMap.put(GobblinClusterConfigurationKeys.TASK_SUCCESS_OPTIONAL_KEY, "true");
// Create a single Job which only contains a single task
taskConfigMap.put(planningId, TaskConfig.Builder.from(rawConfigMap));
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
// We want GobblinHelixJobLauncher only run once.
jobConfigBuilder.setMaxAttemptsPerTask(1);
// Planning job never timeout (Helix defaults 1h timeout, set a large number '1 month')
jobConfigBuilder.setTimeoutPerTask(JobConfig.DEFAULT_TIMEOUT_PER_TASK * 24 * 30);
// Planning job should have its own tag support
if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY)) {
String jobPlanningTag = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY);
log.info("PlanningJob {} has tags associated : {}", planningId, jobPlanningTag);
jobConfigBuilder.setInstanceGroupTag(jobPlanningTag);
}
// Planning job should have its own type support
if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY)) {
String jobType = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY);
log.info("PlanningJob {} has types associated : {}", planningId, jobType);
jobConfigBuilder.setJobType(jobType);
}
jobConfigBuilder.setNumConcurrentTasksPerInstance(PropertiesUtils.getPropAsInt(jobProps,
GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY,
GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT));
jobConfigBuilder.setFailureThreshold(1);
jobConfigBuilder.addTaskConfigMap(taskConfigMap).setCommand(GobblinTaskRunner.GOBBLIN_JOB_FACTORY_NAME);
return jobConfigBuilder;
} | java | private JobConfig.Builder createJobBuilder (Properties jobProps) {
// Create a single task for job planning
String planningId = getPlanningJobId(jobProps);
Map<String, TaskConfig> taskConfigMap = Maps.newHashMap();
Map<String, String> rawConfigMap = Maps.newHashMap();
for (String key : jobProps.stringPropertyNames()) {
rawConfigMap.put(JOB_PROPS_PREFIX + key, (String)jobProps.get(key));
}
rawConfigMap.put(GobblinClusterConfigurationKeys.TASK_SUCCESS_OPTIONAL_KEY, "true");
// Create a single Job which only contains a single task
taskConfigMap.put(planningId, TaskConfig.Builder.from(rawConfigMap));
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
// We want GobblinHelixJobLauncher only run once.
jobConfigBuilder.setMaxAttemptsPerTask(1);
// Planning job never timeout (Helix defaults 1h timeout, set a large number '1 month')
jobConfigBuilder.setTimeoutPerTask(JobConfig.DEFAULT_TIMEOUT_PER_TASK * 24 * 30);
// Planning job should have its own tag support
if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY)) {
String jobPlanningTag = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TAG_KEY);
log.info("PlanningJob {} has tags associated : {}", planningId, jobPlanningTag);
jobConfigBuilder.setInstanceGroupTag(jobPlanningTag);
}
// Planning job should have its own type support
if (jobProps.containsKey(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY)) {
String jobType = jobProps.getProperty(GobblinClusterConfigurationKeys.HELIX_PLANNING_JOB_TYPE_KEY);
log.info("PlanningJob {} has types associated : {}", planningId, jobType);
jobConfigBuilder.setJobType(jobType);
}
jobConfigBuilder.setNumConcurrentTasksPerInstance(PropertiesUtils.getPropAsInt(jobProps,
GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY,
GobblinClusterConfigurationKeys.HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT));
jobConfigBuilder.setFailureThreshold(1);
jobConfigBuilder.addTaskConfigMap(taskConfigMap).setCommand(GobblinTaskRunner.GOBBLIN_JOB_FACTORY_NAME);
return jobConfigBuilder;
} | [
"private",
"JobConfig",
".",
"Builder",
"createJobBuilder",
"(",
"Properties",
"jobProps",
")",
"{",
"// Create a single task for job planning",
"String",
"planningId",
"=",
"getPlanningJobId",
"(",
"jobProps",
")",
";",
"Map",
"<",
"String",
",",
"TaskConfig",
">",
"taskConfigMap",
"=",
"Maps",
".",
"newHashMap",
"(",
")",
";",
"Map",
"<",
"String",
",",
"String",
">",
"rawConfigMap",
"=",
"Maps",
".",
"newHashMap",
"(",
")",
";",
"for",
"(",
"String",
"key",
":",
"jobProps",
".",
"stringPropertyNames",
"(",
")",
")",
"{",
"rawConfigMap",
".",
"put",
"(",
"JOB_PROPS_PREFIX",
"+",
"key",
",",
"(",
"String",
")",
"jobProps",
".",
"get",
"(",
"key",
")",
")",
";",
"}",
"rawConfigMap",
".",
"put",
"(",
"GobblinClusterConfigurationKeys",
".",
"TASK_SUCCESS_OPTIONAL_KEY",
",",
"\"true\"",
")",
";",
"// Create a single Job which only contains a single task",
"taskConfigMap",
".",
"put",
"(",
"planningId",
",",
"TaskConfig",
".",
"Builder",
".",
"from",
"(",
"rawConfigMap",
")",
")",
";",
"JobConfig",
".",
"Builder",
"jobConfigBuilder",
"=",
"new",
"JobConfig",
".",
"Builder",
"(",
")",
";",
"// We want GobblinHelixJobLauncher only run once.",
"jobConfigBuilder",
".",
"setMaxAttemptsPerTask",
"(",
"1",
")",
";",
"// Planning job never timeout (Helix defaults 1h timeout, set a large number '1 month')",
"jobConfigBuilder",
".",
"setTimeoutPerTask",
"(",
"JobConfig",
".",
"DEFAULT_TIMEOUT_PER_TASK",
"*",
"24",
"*",
"30",
")",
";",
"// Planning job should have its own tag support",
"if",
"(",
"jobProps",
".",
"containsKey",
"(",
"GobblinClusterConfigurationKeys",
".",
"HELIX_PLANNING_JOB_TAG_KEY",
")",
")",
"{",
"String",
"jobPlanningTag",
"=",
"jobProps",
".",
"getProperty",
"(",
"GobblinClusterConfigurationKeys",
".",
"HELIX_PLANNING_JOB_TAG_KEY",
")",
";",
"log",
".",
"info",
"(",
"\"PlanningJob {} has tags associated : {}\"",
",",
"planningId",
",",
"jobPlanningTag",
")",
";",
"jobConfigBuilder",
".",
"setInstanceGroupTag",
"(",
"jobPlanningTag",
")",
";",
"}",
"// Planning job should have its own type support",
"if",
"(",
"jobProps",
".",
"containsKey",
"(",
"GobblinClusterConfigurationKeys",
".",
"HELIX_PLANNING_JOB_TYPE_KEY",
")",
")",
"{",
"String",
"jobType",
"=",
"jobProps",
".",
"getProperty",
"(",
"GobblinClusterConfigurationKeys",
".",
"HELIX_PLANNING_JOB_TYPE_KEY",
")",
";",
"log",
".",
"info",
"(",
"\"PlanningJob {} has types associated : {}\"",
",",
"planningId",
",",
"jobType",
")",
";",
"jobConfigBuilder",
".",
"setJobType",
"(",
"jobType",
")",
";",
"}",
"jobConfigBuilder",
".",
"setNumConcurrentTasksPerInstance",
"(",
"PropertiesUtils",
".",
"getPropAsInt",
"(",
"jobProps",
",",
"GobblinClusterConfigurationKeys",
".",
"HELIX_CLUSTER_TASK_CONCURRENCY",
",",
"GobblinClusterConfigurationKeys",
".",
"HELIX_CLUSTER_TASK_CONCURRENCY_DEFAULT",
")",
")",
";",
"jobConfigBuilder",
".",
"setFailureThreshold",
"(",
"1",
")",
";",
"jobConfigBuilder",
".",
"addTaskConfigMap",
"(",
"taskConfigMap",
")",
".",
"setCommand",
"(",
"GobblinTaskRunner",
".",
"GOBBLIN_JOB_FACTORY_NAME",
")",
";",
"return",
"jobConfigBuilder",
";",
"}"
] | Create a job config builder which has a single task that wraps the original jobProps.
The planning job (which runs the original {@link GobblinHelixJobLauncher}) will be
executed on one of the Helix participants.
We rely on the underlying {@link GobblinHelixJobLauncher} to correctly handle the task
execution timeout so that the planning job itself is relieved of the timeout constrain.
In short, the planning job will run once and requires no timeout. | [
"Create",
"a",
"job",
"config",
"builder",
"which",
"has",
"a",
"single",
"task",
"that",
"wraps",
"the",
"original",
"jobProps",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixDistributeJobExecutionLauncher.java#L202-L244 |
25,738 | apache/incubator-gobblin | gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixDistributeJobExecutionLauncher.java | GobblinHelixDistributeJobExecutionLauncher.submitJobToHelix | private void submitJobToHelix(String jobName, String jobId, JobConfig.Builder jobConfigBuilder) throws Exception {
TaskDriver taskDriver = new TaskDriver(this.planningJobHelixManager);
HelixUtils.submitJobToWorkFlow(jobConfigBuilder,
jobName,
jobId,
taskDriver,
this.planningJobHelixManager,
this.workFlowExpiryTimeSeconds);
this.jobSubmitted = true;
} | java | private void submitJobToHelix(String jobName, String jobId, JobConfig.Builder jobConfigBuilder) throws Exception {
TaskDriver taskDriver = new TaskDriver(this.planningJobHelixManager);
HelixUtils.submitJobToWorkFlow(jobConfigBuilder,
jobName,
jobId,
taskDriver,
this.planningJobHelixManager,
this.workFlowExpiryTimeSeconds);
this.jobSubmitted = true;
} | [
"private",
"void",
"submitJobToHelix",
"(",
"String",
"jobName",
",",
"String",
"jobId",
",",
"JobConfig",
".",
"Builder",
"jobConfigBuilder",
")",
"throws",
"Exception",
"{",
"TaskDriver",
"taskDriver",
"=",
"new",
"TaskDriver",
"(",
"this",
".",
"planningJobHelixManager",
")",
";",
"HelixUtils",
".",
"submitJobToWorkFlow",
"(",
"jobConfigBuilder",
",",
"jobName",
",",
"jobId",
",",
"taskDriver",
",",
"this",
".",
"planningJobHelixManager",
",",
"this",
".",
"workFlowExpiryTimeSeconds",
")",
";",
"this",
".",
"jobSubmitted",
"=",
"true",
";",
"}"
] | Submit a planning job to helix so that it can launched from a remote node.
@param jobName A planning job name which has prefix {@link GobblinClusterConfigurationKeys#PLANNING_JOB_NAME_PREFIX}.
@param jobId A planning job id created by {@link GobblinHelixDistributeJobExecutionLauncher#getPlanningJobId}.
@param jobConfigBuilder A job config builder which contains a single task. | [
"Submit",
"a",
"planning",
"job",
"to",
"helix",
"so",
"that",
"it",
"can",
"launched",
"from",
"a",
"remote",
"node",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-cluster/src/main/java/org/apache/gobblin/cluster/GobblinHelixDistributeJobExecutionLauncher.java#L252-L261 |
25,739 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/FileListUtils.java | FileListUtils.listMostNestedPathRecursively | public static List<FileStatus> listMostNestedPathRecursively(FileSystem fs, Path path)
throws IOException {
return listMostNestedPathRecursively(fs, path, NO_OP_PATH_FILTER);
} | java | public static List<FileStatus> listMostNestedPathRecursively(FileSystem fs, Path path)
throws IOException {
return listMostNestedPathRecursively(fs, path, NO_OP_PATH_FILTER);
} | [
"public",
"static",
"List",
"<",
"FileStatus",
">",
"listMostNestedPathRecursively",
"(",
"FileSystem",
"fs",
",",
"Path",
"path",
")",
"throws",
"IOException",
"{",
"return",
"listMostNestedPathRecursively",
"(",
"fs",
",",
"path",
",",
"NO_OP_PATH_FILTER",
")",
";",
"}"
] | Method to list out all files, or directory if no file exists, under a specified path. | [
"Method",
"to",
"list",
"out",
"all",
"files",
"or",
"directory",
"if",
"no",
"file",
"exists",
"under",
"a",
"specified",
"path",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/FileListUtils.java#L166-L169 |
25,740 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/FileListUtils.java | FileListUtils.getAnyNonHiddenFile | public static FileStatus getAnyNonHiddenFile(FileSystem fs, Path path)
throws IOException {
HiddenFilter hiddenFilter = new HiddenFilter();
FileStatus root = fs.getFileStatus(path);
if (!root.isDirectory()) {
return hiddenFilter.accept(path) ? root : null;
}
// DFS to get the first data file
Stack<FileStatus> folders = new Stack<>();
folders.push(root);
while (!folders.empty()) {
FileStatus curFolder = folders.pop();
try {
for (FileStatus status : fs.listStatus(curFolder.getPath(), hiddenFilter)) {
if (status.isDirectory()) {
folders.push(status);
} else {
return status;
}
}
} catch (FileNotFoundException exc) {
// continue
}
}
return null;
} | java | public static FileStatus getAnyNonHiddenFile(FileSystem fs, Path path)
throws IOException {
HiddenFilter hiddenFilter = new HiddenFilter();
FileStatus root = fs.getFileStatus(path);
if (!root.isDirectory()) {
return hiddenFilter.accept(path) ? root : null;
}
// DFS to get the first data file
Stack<FileStatus> folders = new Stack<>();
folders.push(root);
while (!folders.empty()) {
FileStatus curFolder = folders.pop();
try {
for (FileStatus status : fs.listStatus(curFolder.getPath(), hiddenFilter)) {
if (status.isDirectory()) {
folders.push(status);
} else {
return status;
}
}
} catch (FileNotFoundException exc) {
// continue
}
}
return null;
} | [
"public",
"static",
"FileStatus",
"getAnyNonHiddenFile",
"(",
"FileSystem",
"fs",
",",
"Path",
"path",
")",
"throws",
"IOException",
"{",
"HiddenFilter",
"hiddenFilter",
"=",
"new",
"HiddenFilter",
"(",
")",
";",
"FileStatus",
"root",
"=",
"fs",
".",
"getFileStatus",
"(",
"path",
")",
";",
"if",
"(",
"!",
"root",
".",
"isDirectory",
"(",
")",
")",
"{",
"return",
"hiddenFilter",
".",
"accept",
"(",
"path",
")",
"?",
"root",
":",
"null",
";",
"}",
"// DFS to get the first data file",
"Stack",
"<",
"FileStatus",
">",
"folders",
"=",
"new",
"Stack",
"<>",
"(",
")",
";",
"folders",
".",
"push",
"(",
"root",
")",
";",
"while",
"(",
"!",
"folders",
".",
"empty",
"(",
")",
")",
"{",
"FileStatus",
"curFolder",
"=",
"folders",
".",
"pop",
"(",
")",
";",
"try",
"{",
"for",
"(",
"FileStatus",
"status",
":",
"fs",
".",
"listStatus",
"(",
"curFolder",
".",
"getPath",
"(",
")",
",",
"hiddenFilter",
")",
")",
"{",
"if",
"(",
"status",
".",
"isDirectory",
"(",
")",
")",
"{",
"folders",
".",
"push",
"(",
"status",
")",
";",
"}",
"else",
"{",
"return",
"status",
";",
"}",
"}",
"}",
"catch",
"(",
"FileNotFoundException",
"exc",
")",
"{",
"// continue",
"}",
"}",
"return",
"null",
";",
"}"
] | Get any data file, which is not hidden or a directory, from the given path | [
"Get",
"any",
"data",
"file",
"which",
"is",
"not",
"hidden",
"or",
"a",
"directory",
"from",
"the",
"given",
"path"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/FileListUtils.java#L237-L265 |
25,741 | apache/incubator-gobblin | gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowStatusResource.java | FlowStatusResource.get | @Override
public FlowStatus get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
long flowExecutionId = key.getKey().getFlowExecutionId();
LOG.info("Get called with flowGroup " + flowGroup + " flowName " + flowName + " flowExecutionId " + flowExecutionId);
org.apache.gobblin.service.monitoring.FlowStatus flowStatus =
_flowStatusGenerator.getFlowStatus(flowName, flowGroup, flowExecutionId);
// this returns null to raise a 404 error if flowStatus is null
return convertFlowStatus(flowStatus);
} | java | @Override
public FlowStatus get(ComplexResourceKey<FlowStatusId, EmptyRecord> key) {
String flowGroup = key.getKey().getFlowGroup();
String flowName = key.getKey().getFlowName();
long flowExecutionId = key.getKey().getFlowExecutionId();
LOG.info("Get called with flowGroup " + flowGroup + " flowName " + flowName + " flowExecutionId " + flowExecutionId);
org.apache.gobblin.service.monitoring.FlowStatus flowStatus =
_flowStatusGenerator.getFlowStatus(flowName, flowGroup, flowExecutionId);
// this returns null to raise a 404 error if flowStatus is null
return convertFlowStatus(flowStatus);
} | [
"@",
"Override",
"public",
"FlowStatus",
"get",
"(",
"ComplexResourceKey",
"<",
"FlowStatusId",
",",
"EmptyRecord",
">",
"key",
")",
"{",
"String",
"flowGroup",
"=",
"key",
".",
"getKey",
"(",
")",
".",
"getFlowGroup",
"(",
")",
";",
"String",
"flowName",
"=",
"key",
".",
"getKey",
"(",
")",
".",
"getFlowName",
"(",
")",
";",
"long",
"flowExecutionId",
"=",
"key",
".",
"getKey",
"(",
")",
".",
"getFlowExecutionId",
"(",
")",
";",
"LOG",
".",
"info",
"(",
"\"Get called with flowGroup \"",
"+",
"flowGroup",
"+",
"\" flowName \"",
"+",
"flowName",
"+",
"\" flowExecutionId \"",
"+",
"flowExecutionId",
")",
";",
"org",
".",
"apache",
".",
"gobblin",
".",
"service",
".",
"monitoring",
".",
"FlowStatus",
"flowStatus",
"=",
"_flowStatusGenerator",
".",
"getFlowStatus",
"(",
"flowName",
",",
"flowGroup",
",",
"flowExecutionId",
")",
";",
"// this returns null to raise a 404 error if flowStatus is null",
"return",
"convertFlowStatus",
"(",
"flowStatus",
")",
";",
"}"
] | Retrieve the FlowStatus with the given key
@param key flow status id key containing group name and flow name
@return {@link FlowStatus} with flow status for the latest execution of the flow | [
"Retrieve",
"the",
"FlowStatus",
"with",
"the",
"given",
"key"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowStatusResource.java#L61-L74 |
25,742 | apache/incubator-gobblin | gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowStatusResource.java | FlowStatusResource.updatedFlowExecutionStatus | static ExecutionStatus updatedFlowExecutionStatus(ExecutionStatus jobExecutionStatus,
ExecutionStatus currentFlowExecutionStatus) {
// if any job failed or flow has failed then return failed status
if (currentFlowExecutionStatus == ExecutionStatus.FAILED ||
jobExecutionStatus == ExecutionStatus.FAILED) {
return ExecutionStatus.FAILED;
}
// if any job is cancelled or flow has failed then return failed status
if (currentFlowExecutionStatus == ExecutionStatus.CANCELLED ||
jobExecutionStatus == ExecutionStatus.CANCELLED) {
return ExecutionStatus.CANCELLED;
}
if (currentFlowExecutionStatus == ExecutionStatus.RUNNING ||
jobExecutionStatus == ExecutionStatus.RUNNING ||
jobExecutionStatus == ExecutionStatus.ORCHESTRATED ||
jobExecutionStatus == ExecutionStatus.COMPILED) {
return ExecutionStatus.RUNNING;
}
return currentFlowExecutionStatus;
} | java | static ExecutionStatus updatedFlowExecutionStatus(ExecutionStatus jobExecutionStatus,
ExecutionStatus currentFlowExecutionStatus) {
// if any job failed or flow has failed then return failed status
if (currentFlowExecutionStatus == ExecutionStatus.FAILED ||
jobExecutionStatus == ExecutionStatus.FAILED) {
return ExecutionStatus.FAILED;
}
// if any job is cancelled or flow has failed then return failed status
if (currentFlowExecutionStatus == ExecutionStatus.CANCELLED ||
jobExecutionStatus == ExecutionStatus.CANCELLED) {
return ExecutionStatus.CANCELLED;
}
if (currentFlowExecutionStatus == ExecutionStatus.RUNNING ||
jobExecutionStatus == ExecutionStatus.RUNNING ||
jobExecutionStatus == ExecutionStatus.ORCHESTRATED ||
jobExecutionStatus == ExecutionStatus.COMPILED) {
return ExecutionStatus.RUNNING;
}
return currentFlowExecutionStatus;
} | [
"static",
"ExecutionStatus",
"updatedFlowExecutionStatus",
"(",
"ExecutionStatus",
"jobExecutionStatus",
",",
"ExecutionStatus",
"currentFlowExecutionStatus",
")",
"{",
"// if any job failed or flow has failed then return failed status",
"if",
"(",
"currentFlowExecutionStatus",
"==",
"ExecutionStatus",
".",
"FAILED",
"||",
"jobExecutionStatus",
"==",
"ExecutionStatus",
".",
"FAILED",
")",
"{",
"return",
"ExecutionStatus",
".",
"FAILED",
";",
"}",
"// if any job is cancelled or flow has failed then return failed status",
"if",
"(",
"currentFlowExecutionStatus",
"==",
"ExecutionStatus",
".",
"CANCELLED",
"||",
"jobExecutionStatus",
"==",
"ExecutionStatus",
".",
"CANCELLED",
")",
"{",
"return",
"ExecutionStatus",
".",
"CANCELLED",
";",
"}",
"if",
"(",
"currentFlowExecutionStatus",
"==",
"ExecutionStatus",
".",
"RUNNING",
"||",
"jobExecutionStatus",
"==",
"ExecutionStatus",
".",
"RUNNING",
"||",
"jobExecutionStatus",
"==",
"ExecutionStatus",
".",
"ORCHESTRATED",
"||",
"jobExecutionStatus",
"==",
"ExecutionStatus",
".",
"COMPILED",
")",
"{",
"return",
"ExecutionStatus",
".",
"RUNNING",
";",
"}",
"return",
"currentFlowExecutionStatus",
";",
"}"
] | Determines the new flow status based on the current flow status and new job status
@param jobExecutionStatus job status
@param currentFlowExecutionStatus current flow status
@return updated flow status | [
"Determines",
"the",
"new",
"flow",
"status",
"based",
"on",
"the",
"current",
"flow",
"status",
"and",
"new",
"job",
"status"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-restli/gobblin-flow-config-service/gobblin-flow-config-service-server/src/main/java/org/apache/gobblin/service/FlowStatusResource.java#L171-L194 |
25,743 | apache/incubator-gobblin | gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/LoadBasedFlowEdgeImpl.java | LoadBasedFlowEdgeImpl.calculateEdgeIdentity | public static String calculateEdgeIdentity(ServiceNode sourceNode, ServiceNode targetNode, SpecExecutor specExecutorInstance){
return sourceNode.getNodeName() + "-" + specExecutorInstance.getUri() + "-" + targetNode.getNodeName();
} | java | public static String calculateEdgeIdentity(ServiceNode sourceNode, ServiceNode targetNode, SpecExecutor specExecutorInstance){
return sourceNode.getNodeName() + "-" + specExecutorInstance.getUri() + "-" + targetNode.getNodeName();
} | [
"public",
"static",
"String",
"calculateEdgeIdentity",
"(",
"ServiceNode",
"sourceNode",
",",
"ServiceNode",
"targetNode",
",",
"SpecExecutor",
"specExecutorInstance",
")",
"{",
"return",
"sourceNode",
".",
"getNodeName",
"(",
")",
"+",
"\"-\"",
"+",
"specExecutorInstance",
".",
"getUri",
"(",
")",
"+",
"\"-\"",
"+",
"targetNode",
".",
"getNodeName",
"(",
")",
";",
"}"
] | A naive implementation of edge identity calculation.
@return | [
"A",
"naive",
"implementation",
"of",
"edge",
"identity",
"calculation",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/LoadBasedFlowEdgeImpl.java#L136-L138 |
25,744 | apache/incubator-gobblin | gobblin-config-management/gobblin-config-core/src/main/java/org/apache/gobblin/config/store/zip/IvyConfigStoreFactory.java | IvyConfigStoreFactory.getBaseURI | private URI getBaseURI(URI configKey) throws URISyntaxException {
return new URI(configKey.getScheme(), configKey.getAuthority(), null, configKey.getQuery(), configKey.getFragment());
} | java | private URI getBaseURI(URI configKey) throws URISyntaxException {
return new URI(configKey.getScheme(), configKey.getAuthority(), null, configKey.getQuery(), configKey.getFragment());
} | [
"private",
"URI",
"getBaseURI",
"(",
"URI",
"configKey",
")",
"throws",
"URISyntaxException",
"{",
"return",
"new",
"URI",
"(",
"configKey",
".",
"getScheme",
"(",
")",
",",
"configKey",
".",
"getAuthority",
"(",
")",
",",
"null",
",",
"configKey",
".",
"getQuery",
"(",
")",
",",
"configKey",
".",
"getFragment",
"(",
")",
")",
";",
"}"
] | Base URI for a config store should be root of the zip file, so change path part of URI to be null | [
"Base",
"URI",
"for",
"a",
"config",
"store",
"should",
"be",
"root",
"of",
"the",
"zip",
"file",
"so",
"change",
"path",
"part",
"of",
"URI",
"to",
"be",
"null"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-config-management/gobblin-config-core/src/main/java/org/apache/gobblin/config/store/zip/IvyConfigStoreFactory.java#L123-L125 |
25,745 | apache/incubator-gobblin | gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/event/TimingEvent.java | TimingEvent.stop | public void stop(Map<String, String> additionalMetadata) {
if (this.stopped) {
return;
}
this.stopped = true;
long endTime = System.currentTimeMillis();
long duration = endTime - this.startTime;
Map<String, String> finalMetadata = Maps.newHashMap();
finalMetadata.putAll(additionalMetadata);
finalMetadata.put(EventSubmitter.EVENT_TYPE, METADATA_TIMING_EVENT);
finalMetadata.put(METADATA_START_TIME, Long.toString(this.startTime));
finalMetadata.put(METADATA_END_TIME, Long.toString(endTime));
finalMetadata.put(METADATA_DURATION, Long.toString(duration));
this.submitter.submit(this.name, finalMetadata);
} | java | public void stop(Map<String, String> additionalMetadata) {
if (this.stopped) {
return;
}
this.stopped = true;
long endTime = System.currentTimeMillis();
long duration = endTime - this.startTime;
Map<String, String> finalMetadata = Maps.newHashMap();
finalMetadata.putAll(additionalMetadata);
finalMetadata.put(EventSubmitter.EVENT_TYPE, METADATA_TIMING_EVENT);
finalMetadata.put(METADATA_START_TIME, Long.toString(this.startTime));
finalMetadata.put(METADATA_END_TIME, Long.toString(endTime));
finalMetadata.put(METADATA_DURATION, Long.toString(duration));
this.submitter.submit(this.name, finalMetadata);
} | [
"public",
"void",
"stop",
"(",
"Map",
"<",
"String",
",",
"String",
">",
"additionalMetadata",
")",
"{",
"if",
"(",
"this",
".",
"stopped",
")",
"{",
"return",
";",
"}",
"this",
".",
"stopped",
"=",
"true",
";",
"long",
"endTime",
"=",
"System",
".",
"currentTimeMillis",
"(",
")",
";",
"long",
"duration",
"=",
"endTime",
"-",
"this",
".",
"startTime",
";",
"Map",
"<",
"String",
",",
"String",
">",
"finalMetadata",
"=",
"Maps",
".",
"newHashMap",
"(",
")",
";",
"finalMetadata",
".",
"putAll",
"(",
"additionalMetadata",
")",
";",
"finalMetadata",
".",
"put",
"(",
"EventSubmitter",
".",
"EVENT_TYPE",
",",
"METADATA_TIMING_EVENT",
")",
";",
"finalMetadata",
".",
"put",
"(",
"METADATA_START_TIME",
",",
"Long",
".",
"toString",
"(",
"this",
".",
"startTime",
")",
")",
";",
"finalMetadata",
".",
"put",
"(",
"METADATA_END_TIME",
",",
"Long",
".",
"toString",
"(",
"endTime",
")",
")",
";",
"finalMetadata",
".",
"put",
"(",
"METADATA_DURATION",
",",
"Long",
".",
"toString",
"(",
"duration",
")",
")",
";",
"this",
".",
"submitter",
".",
"submit",
"(",
"this",
".",
"name",
",",
"finalMetadata",
")",
";",
"}"
] | Stop the timer and submit the event, along with the additional metadata specified. If the timer was already stopped
before, this is a no-op.
@param additionalMetadata a {@link Map} of additional metadata that should be submitted along with this event | [
"Stop",
"the",
"timer",
"and",
"submit",
"the",
"event",
"along",
"with",
"the",
"additional",
"metadata",
"specified",
".",
"If",
"the",
"timer",
"was",
"already",
"stopped",
"before",
"this",
"is",
"a",
"no",
"-",
"op",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-metrics-libs/gobblin-metrics-base/src/main/java/org/apache/gobblin/metrics/event/TimingEvent.java#L109-L125 |
25,746 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/recordaccess/RecordAccessorProviderFactory.java | RecordAccessorProviderFactory.getRecordAccessorForObject | public synchronized static RecordAccessor getRecordAccessorForObject(Object obj) {
for (RecordAccessorProvider p: recordAccessorProviders) {
RecordAccessor accessor = p.recordAccessorForObject(obj);
if (accessor != null) {
return accessor;
}
}
throw new IllegalArgumentException("Can't build accessor for object " + obj.toString() + "!");
} | java | public synchronized static RecordAccessor getRecordAccessorForObject(Object obj) {
for (RecordAccessorProvider p: recordAccessorProviders) {
RecordAccessor accessor = p.recordAccessorForObject(obj);
if (accessor != null) {
return accessor;
}
}
throw new IllegalArgumentException("Can't build accessor for object " + obj.toString() + "!");
} | [
"public",
"synchronized",
"static",
"RecordAccessor",
"getRecordAccessorForObject",
"(",
"Object",
"obj",
")",
"{",
"for",
"(",
"RecordAccessorProvider",
"p",
":",
"recordAccessorProviders",
")",
"{",
"RecordAccessor",
"accessor",
"=",
"p",
".",
"recordAccessorForObject",
"(",
"obj",
")",
";",
"if",
"(",
"accessor",
"!=",
"null",
")",
"{",
"return",
"accessor",
";",
"}",
"}",
"throw",
"new",
"IllegalArgumentException",
"(",
"\"Can't build accessor for object \"",
"+",
"obj",
".",
"toString",
"(",
")",
"+",
"\"!\"",
")",
";",
"}"
] | Get a RecordAccessor for a given object. Throws IllegalArgumentException if none
can be built. | [
"Get",
"a",
"RecordAccessor",
"for",
"a",
"given",
"object",
".",
"Throws",
"IllegalArgumentException",
"if",
"none",
"can",
"be",
"built",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/recordaccess/RecordAccessorProviderFactory.java#L36-L45 |
25,747 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobPropCreator.java | MRCompactorJobPropCreator.getNewDataInFolder | private Set<Path> getNewDataInFolder(Path inputFolder, Path outputFolder) throws IOException {
Set<Path> newFiles = Sets.newHashSet();
if (!this.fs.exists(inputFolder) || !this.fs.exists(outputFolder)) {
return newFiles;
}
DateTime lastCompactionTime = new DateTime(MRCompactor.readCompactionTimestamp(this.fs, outputFolder));
for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputFolder)) {
DateTime fileModificationTime = new DateTime(fstat.getModificationTime());
if (fileModificationTime.isAfter(lastCompactionTime)) {
LOG.info ("[" + fileModificationTime.getMillis() + "] " + fstat.getPath() + " is after " + lastCompactionTime.getMillis());
newFiles.add(fstat.getPath());
}
}
if (!newFiles.isEmpty()) {
LOG.info(String.format("Found %d new files within folder %s which are more recent than the previous "
+ "compaction start time of %s.", newFiles.size(), inputFolder, lastCompactionTime));
}
return newFiles;
} | java | private Set<Path> getNewDataInFolder(Path inputFolder, Path outputFolder) throws IOException {
Set<Path> newFiles = Sets.newHashSet();
if (!this.fs.exists(inputFolder) || !this.fs.exists(outputFolder)) {
return newFiles;
}
DateTime lastCompactionTime = new DateTime(MRCompactor.readCompactionTimestamp(this.fs, outputFolder));
for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputFolder)) {
DateTime fileModificationTime = new DateTime(fstat.getModificationTime());
if (fileModificationTime.isAfter(lastCompactionTime)) {
LOG.info ("[" + fileModificationTime.getMillis() + "] " + fstat.getPath() + " is after " + lastCompactionTime.getMillis());
newFiles.add(fstat.getPath());
}
}
if (!newFiles.isEmpty()) {
LOG.info(String.format("Found %d new files within folder %s which are more recent than the previous "
+ "compaction start time of %s.", newFiles.size(), inputFolder, lastCompactionTime));
}
return newFiles;
} | [
"private",
"Set",
"<",
"Path",
">",
"getNewDataInFolder",
"(",
"Path",
"inputFolder",
",",
"Path",
"outputFolder",
")",
"throws",
"IOException",
"{",
"Set",
"<",
"Path",
">",
"newFiles",
"=",
"Sets",
".",
"newHashSet",
"(",
")",
";",
"if",
"(",
"!",
"this",
".",
"fs",
".",
"exists",
"(",
"inputFolder",
")",
"||",
"!",
"this",
".",
"fs",
".",
"exists",
"(",
"outputFolder",
")",
")",
"{",
"return",
"newFiles",
";",
"}",
"DateTime",
"lastCompactionTime",
"=",
"new",
"DateTime",
"(",
"MRCompactor",
".",
"readCompactionTimestamp",
"(",
"this",
".",
"fs",
",",
"outputFolder",
")",
")",
";",
"for",
"(",
"FileStatus",
"fstat",
":",
"FileListUtils",
".",
"listFilesRecursively",
"(",
"this",
".",
"fs",
",",
"inputFolder",
")",
")",
"{",
"DateTime",
"fileModificationTime",
"=",
"new",
"DateTime",
"(",
"fstat",
".",
"getModificationTime",
"(",
")",
")",
";",
"if",
"(",
"fileModificationTime",
".",
"isAfter",
"(",
"lastCompactionTime",
")",
")",
"{",
"LOG",
".",
"info",
"(",
"\"[\"",
"+",
"fileModificationTime",
".",
"getMillis",
"(",
")",
"+",
"\"] \"",
"+",
"fstat",
".",
"getPath",
"(",
")",
"+",
"\" is after \"",
"+",
"lastCompactionTime",
".",
"getMillis",
"(",
")",
")",
";",
"newFiles",
".",
"add",
"(",
"fstat",
".",
"getPath",
"(",
")",
")",
";",
"}",
"}",
"if",
"(",
"!",
"newFiles",
".",
"isEmpty",
"(",
")",
")",
"{",
"LOG",
".",
"info",
"(",
"String",
".",
"format",
"(",
"\"Found %d new files within folder %s which are more recent than the previous \"",
"+",
"\"compaction start time of %s.\"",
",",
"newFiles",
".",
"size",
"(",
")",
",",
"inputFolder",
",",
"lastCompactionTime",
")",
")",
";",
"}",
"return",
"newFiles",
";",
"}"
] | Check if inputFolder contains any files which have modification times which are more
recent than the last compaction time as stored within outputFolder; return any files
which do. An empty list will be returned if all files are older than the last compaction time. | [
"Check",
"if",
"inputFolder",
"contains",
"any",
"files",
"which",
"have",
"modification",
"times",
"which",
"are",
"more",
"recent",
"than",
"the",
"last",
"compaction",
"time",
"as",
"stored",
"within",
"outputFolder",
";",
"return",
"any",
"files",
"which",
"do",
".",
"An",
"empty",
"list",
"will",
"be",
"returned",
"if",
"all",
"files",
"are",
"older",
"than",
"the",
"last",
"compaction",
"time",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/MRCompactorJobPropCreator.java#L314-L335 |
25,748 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | HiveConverterUtils.generateStagingCTASStatement | public static String generateStagingCTASStatement(HiveDatasetFinder.DbAndTable outputDbAndTable,
String sourceQuery, StorageFormat storageFormat, String outputTableLocation) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(outputDbAndTable.getDb()) &&
!Strings.isNullOrEmpty(outputDbAndTable.getTable()), "Invalid output db and table " + outputDbAndTable);
return String.format("CREATE TEMPORARY TABLE `%s`.`%s` STORED AS %s LOCATION '%s' AS %s", outputDbAndTable.getDb(),
outputDbAndTable.getTable(), storageFormat.getHiveName(), outputTableLocation, sourceQuery);
} | java | public static String generateStagingCTASStatement(HiveDatasetFinder.DbAndTable outputDbAndTable,
String sourceQuery, StorageFormat storageFormat, String outputTableLocation) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(outputDbAndTable.getDb()) &&
!Strings.isNullOrEmpty(outputDbAndTable.getTable()), "Invalid output db and table " + outputDbAndTable);
return String.format("CREATE TEMPORARY TABLE `%s`.`%s` STORED AS %s LOCATION '%s' AS %s", outputDbAndTable.getDb(),
outputDbAndTable.getTable(), storageFormat.getHiveName(), outputTableLocation, sourceQuery);
} | [
"public",
"static",
"String",
"generateStagingCTASStatement",
"(",
"HiveDatasetFinder",
".",
"DbAndTable",
"outputDbAndTable",
",",
"String",
"sourceQuery",
",",
"StorageFormat",
"storageFormat",
",",
"String",
"outputTableLocation",
")",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"!",
"Strings",
".",
"isNullOrEmpty",
"(",
"outputDbAndTable",
".",
"getDb",
"(",
")",
")",
"&&",
"!",
"Strings",
".",
"isNullOrEmpty",
"(",
"outputDbAndTable",
".",
"getTable",
"(",
")",
")",
",",
"\"Invalid output db and table \"",
"+",
"outputDbAndTable",
")",
";",
"return",
"String",
".",
"format",
"(",
"\"CREATE TEMPORARY TABLE `%s`.`%s` STORED AS %s LOCATION '%s' AS %s\"",
",",
"outputDbAndTable",
".",
"getDb",
"(",
")",
",",
"outputDbAndTable",
".",
"getTable",
"(",
")",
",",
"storageFormat",
".",
"getHiveName",
"(",
")",
",",
"outputTableLocation",
",",
"sourceQuery",
")",
";",
"}"
] | Generates a CTAS statement to dump the results of a query into a new table.
@param outputDbAndTable output db and table where contents should be written.
@param sourceQuery query to materialize.
@param storageFormat format of output table.
@param outputTableLocation location where files of output table should be written. | [
"Generates",
"a",
"CTAS",
"statement",
"to",
"dump",
"the",
"results",
"of",
"a",
"query",
"into",
"a",
"new",
"table",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java#L183-L190 |
25,749 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | HiveConverterUtils.generateTableCopy | public static String generateTableCopy(
String inputTblName,
String outputTblName,
String inputDbName,
String outputDbName,
Optional<Map<String, String>> optionalPartitionDMLInfo) {
Preconditions.checkArgument(StringUtils.isNotBlank(inputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(inputDbName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputDbName));
StringBuilder dmlQuery = new StringBuilder();
// Insert query
dmlQuery.append(String.format("INSERT OVERWRITE TABLE `%s`.`%s` %n", outputDbName, outputTblName));
if (optionalPartitionDMLInfo.isPresent() && optionalPartitionDMLInfo.get().size() > 0) {
// Partition details
dmlQuery.append(partitionKeyValues(optionalPartitionDMLInfo));
}
dmlQuery.append(String.format("SELECT * FROM `%s`.`%s`", inputDbName, inputTblName));
if (optionalPartitionDMLInfo.isPresent()) {
if (optionalPartitionDMLInfo.get().size() > 0) {
dmlQuery.append(" WHERE ");
String partitionsAndValues = optionalPartitionDMLInfo.get().entrySet().stream()
.map(e -> "`" + e.getKey() + "`='" + e.getValue() + "'")
.collect(joining(" AND "));
dmlQuery.append(partitionsAndValues);
}
}
return dmlQuery.toString();
} | java | public static String generateTableCopy(
String inputTblName,
String outputTblName,
String inputDbName,
String outputDbName,
Optional<Map<String, String>> optionalPartitionDMLInfo) {
Preconditions.checkArgument(StringUtils.isNotBlank(inputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputTblName));
Preconditions.checkArgument(StringUtils.isNotBlank(inputDbName));
Preconditions.checkArgument(StringUtils.isNotBlank(outputDbName));
StringBuilder dmlQuery = new StringBuilder();
// Insert query
dmlQuery.append(String.format("INSERT OVERWRITE TABLE `%s`.`%s` %n", outputDbName, outputTblName));
if (optionalPartitionDMLInfo.isPresent() && optionalPartitionDMLInfo.get().size() > 0) {
// Partition details
dmlQuery.append(partitionKeyValues(optionalPartitionDMLInfo));
}
dmlQuery.append(String.format("SELECT * FROM `%s`.`%s`", inputDbName, inputTblName));
if (optionalPartitionDMLInfo.isPresent()) {
if (optionalPartitionDMLInfo.get().size() > 0) {
dmlQuery.append(" WHERE ");
String partitionsAndValues = optionalPartitionDMLInfo.get().entrySet().stream()
.map(e -> "`" + e.getKey() + "`='" + e.getValue() + "'")
.collect(joining(" AND "));
dmlQuery.append(partitionsAndValues);
}
}
return dmlQuery.toString();
} | [
"public",
"static",
"String",
"generateTableCopy",
"(",
"String",
"inputTblName",
",",
"String",
"outputTblName",
",",
"String",
"inputDbName",
",",
"String",
"outputDbName",
",",
"Optional",
"<",
"Map",
"<",
"String",
",",
"String",
">",
">",
"optionalPartitionDMLInfo",
")",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"StringUtils",
".",
"isNotBlank",
"(",
"inputTblName",
")",
")",
";",
"Preconditions",
".",
"checkArgument",
"(",
"StringUtils",
".",
"isNotBlank",
"(",
"outputTblName",
")",
")",
";",
"Preconditions",
".",
"checkArgument",
"(",
"StringUtils",
".",
"isNotBlank",
"(",
"inputDbName",
")",
")",
";",
"Preconditions",
".",
"checkArgument",
"(",
"StringUtils",
".",
"isNotBlank",
"(",
"outputDbName",
")",
")",
";",
"StringBuilder",
"dmlQuery",
"=",
"new",
"StringBuilder",
"(",
")",
";",
"// Insert query",
"dmlQuery",
".",
"append",
"(",
"String",
".",
"format",
"(",
"\"INSERT OVERWRITE TABLE `%s`.`%s` %n\"",
",",
"outputDbName",
",",
"outputTblName",
")",
")",
";",
"if",
"(",
"optionalPartitionDMLInfo",
".",
"isPresent",
"(",
")",
"&&",
"optionalPartitionDMLInfo",
".",
"get",
"(",
")",
".",
"size",
"(",
")",
">",
"0",
")",
"{",
"// Partition details",
"dmlQuery",
".",
"append",
"(",
"partitionKeyValues",
"(",
"optionalPartitionDMLInfo",
")",
")",
";",
"}",
"dmlQuery",
".",
"append",
"(",
"String",
".",
"format",
"(",
"\"SELECT * FROM `%s`.`%s`\"",
",",
"inputDbName",
",",
"inputTblName",
")",
")",
";",
"if",
"(",
"optionalPartitionDMLInfo",
".",
"isPresent",
"(",
")",
")",
"{",
"if",
"(",
"optionalPartitionDMLInfo",
".",
"get",
"(",
")",
".",
"size",
"(",
")",
">",
"0",
")",
"{",
"dmlQuery",
".",
"append",
"(",
"\" WHERE \"",
")",
";",
"String",
"partitionsAndValues",
"=",
"optionalPartitionDMLInfo",
".",
"get",
"(",
")",
".",
"entrySet",
"(",
")",
".",
"stream",
"(",
")",
".",
"map",
"(",
"e",
"->",
"\"`\"",
"+",
"e",
".",
"getKey",
"(",
")",
"+",
"\"`='\"",
"+",
"e",
".",
"getValue",
"(",
")",
"+",
"\"'\"",
")",
".",
"collect",
"(",
"joining",
"(",
"\" AND \"",
")",
")",
";",
"dmlQuery",
".",
"append",
"(",
"partitionsAndValues",
")",
";",
"}",
"}",
"return",
"dmlQuery",
".",
"toString",
"(",
")",
";",
"}"
] | Fills data from input table into output table.
@param inputTblName input hive table name
@param outputTblName output hive table name
@param inputDbName input hive database name
@param outputDbName output hive database name
@param optionalPartitionDMLInfo input hive table's partition's name and value
@return Hive query string | [
"Fills",
"data",
"from",
"input",
"table",
"into",
"output",
"table",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java#L201-L234 |
25,750 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | HiveConverterUtils.populatePartitionInfo | public static void populatePartitionInfo(HiveProcessingEntity conversionEntity, Map<String, String> partitionsDDLInfo,
Map<String, String> partitionsDMLInfo) {
String partitionsInfoString = null;
String partitionsTypeString = null;
if (conversionEntity.getPartition().isPresent()) {
partitionsInfoString = conversionEntity.getPartition().get().getName();
partitionsTypeString = conversionEntity.getPartition().get().getSchema().getProperty("partition_columns.types");
}
if (StringUtils.isNotBlank(partitionsInfoString) || StringUtils.isNotBlank(partitionsTypeString)) {
if (StringUtils.isBlank(partitionsInfoString) || StringUtils.isBlank(partitionsTypeString)) {
throw new IllegalArgumentException("Both partitions info and partitions must be present, if one is specified");
}
List<String> pInfo = Splitter.on(HIVE_PARTITIONS_INFO).omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
List<String> pType = Splitter.on(HIVE_PARTITIONS_TYPE).omitEmptyStrings().trimResults().splitToList(partitionsTypeString);
log.debug("PartitionsInfoString: " + partitionsInfoString);
log.debug("PartitionsTypeString: " + partitionsTypeString);
if (pInfo.size() != pType.size()) {
throw new IllegalArgumentException("partitions info and partitions type list should of same size");
}
for (int i = 0; i < pInfo.size(); i++) {
List<String> partitionInfoParts = Splitter.on("=").omitEmptyStrings().trimResults().splitToList(pInfo.get(i));
String partitionType = pType.get(i);
if (partitionInfoParts.size() != 2) {
throw new IllegalArgumentException(
String.format("Partition details should be of the format partitionName=partitionValue. Recieved: %s", pInfo.get(i)));
}
partitionsDDLInfo.put(partitionInfoParts.get(0), partitionType);
partitionsDMLInfo.put(partitionInfoParts.get(0), partitionInfoParts.get(1));
}
}
} | java | public static void populatePartitionInfo(HiveProcessingEntity conversionEntity, Map<String, String> partitionsDDLInfo,
Map<String, String> partitionsDMLInfo) {
String partitionsInfoString = null;
String partitionsTypeString = null;
if (conversionEntity.getPartition().isPresent()) {
partitionsInfoString = conversionEntity.getPartition().get().getName();
partitionsTypeString = conversionEntity.getPartition().get().getSchema().getProperty("partition_columns.types");
}
if (StringUtils.isNotBlank(partitionsInfoString) || StringUtils.isNotBlank(partitionsTypeString)) {
if (StringUtils.isBlank(partitionsInfoString) || StringUtils.isBlank(partitionsTypeString)) {
throw new IllegalArgumentException("Both partitions info and partitions must be present, if one is specified");
}
List<String> pInfo = Splitter.on(HIVE_PARTITIONS_INFO).omitEmptyStrings().trimResults().splitToList(partitionsInfoString);
List<String> pType = Splitter.on(HIVE_PARTITIONS_TYPE).omitEmptyStrings().trimResults().splitToList(partitionsTypeString);
log.debug("PartitionsInfoString: " + partitionsInfoString);
log.debug("PartitionsTypeString: " + partitionsTypeString);
if (pInfo.size() != pType.size()) {
throw new IllegalArgumentException("partitions info and partitions type list should of same size");
}
for (int i = 0; i < pInfo.size(); i++) {
List<String> partitionInfoParts = Splitter.on("=").omitEmptyStrings().trimResults().splitToList(pInfo.get(i));
String partitionType = pType.get(i);
if (partitionInfoParts.size() != 2) {
throw new IllegalArgumentException(
String.format("Partition details should be of the format partitionName=partitionValue. Recieved: %s", pInfo.get(i)));
}
partitionsDDLInfo.put(partitionInfoParts.get(0), partitionType);
partitionsDMLInfo.put(partitionInfoParts.get(0), partitionInfoParts.get(1));
}
}
} | [
"public",
"static",
"void",
"populatePartitionInfo",
"(",
"HiveProcessingEntity",
"conversionEntity",
",",
"Map",
"<",
"String",
",",
"String",
">",
"partitionsDDLInfo",
",",
"Map",
"<",
"String",
",",
"String",
">",
"partitionsDMLInfo",
")",
"{",
"String",
"partitionsInfoString",
"=",
"null",
";",
"String",
"partitionsTypeString",
"=",
"null",
";",
"if",
"(",
"conversionEntity",
".",
"getPartition",
"(",
")",
".",
"isPresent",
"(",
")",
")",
"{",
"partitionsInfoString",
"=",
"conversionEntity",
".",
"getPartition",
"(",
")",
".",
"get",
"(",
")",
".",
"getName",
"(",
")",
";",
"partitionsTypeString",
"=",
"conversionEntity",
".",
"getPartition",
"(",
")",
".",
"get",
"(",
")",
".",
"getSchema",
"(",
")",
".",
"getProperty",
"(",
"\"partition_columns.types\"",
")",
";",
"}",
"if",
"(",
"StringUtils",
".",
"isNotBlank",
"(",
"partitionsInfoString",
")",
"||",
"StringUtils",
".",
"isNotBlank",
"(",
"partitionsTypeString",
")",
")",
"{",
"if",
"(",
"StringUtils",
".",
"isBlank",
"(",
"partitionsInfoString",
")",
"||",
"StringUtils",
".",
"isBlank",
"(",
"partitionsTypeString",
")",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"\"Both partitions info and partitions must be present, if one is specified\"",
")",
";",
"}",
"List",
"<",
"String",
">",
"pInfo",
"=",
"Splitter",
".",
"on",
"(",
"HIVE_PARTITIONS_INFO",
")",
".",
"omitEmptyStrings",
"(",
")",
".",
"trimResults",
"(",
")",
".",
"splitToList",
"(",
"partitionsInfoString",
")",
";",
"List",
"<",
"String",
">",
"pType",
"=",
"Splitter",
".",
"on",
"(",
"HIVE_PARTITIONS_TYPE",
")",
".",
"omitEmptyStrings",
"(",
")",
".",
"trimResults",
"(",
")",
".",
"splitToList",
"(",
"partitionsTypeString",
")",
";",
"log",
".",
"debug",
"(",
"\"PartitionsInfoString: \"",
"+",
"partitionsInfoString",
")",
";",
"log",
".",
"debug",
"(",
"\"PartitionsTypeString: \"",
"+",
"partitionsTypeString",
")",
";",
"if",
"(",
"pInfo",
".",
"size",
"(",
")",
"!=",
"pType",
".",
"size",
"(",
")",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"\"partitions info and partitions type list should of same size\"",
")",
";",
"}",
"for",
"(",
"int",
"i",
"=",
"0",
";",
"i",
"<",
"pInfo",
".",
"size",
"(",
")",
";",
"i",
"++",
")",
"{",
"List",
"<",
"String",
">",
"partitionInfoParts",
"=",
"Splitter",
".",
"on",
"(",
"\"=\"",
")",
".",
"omitEmptyStrings",
"(",
")",
".",
"trimResults",
"(",
")",
".",
"splitToList",
"(",
"pInfo",
".",
"get",
"(",
"i",
")",
")",
";",
"String",
"partitionType",
"=",
"pType",
".",
"get",
"(",
"i",
")",
";",
"if",
"(",
"partitionInfoParts",
".",
"size",
"(",
")",
"!=",
"2",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"String",
".",
"format",
"(",
"\"Partition details should be of the format partitionName=partitionValue. Recieved: %s\"",
",",
"pInfo",
".",
"get",
"(",
"i",
")",
")",
")",
";",
"}",
"partitionsDDLInfo",
".",
"put",
"(",
"partitionInfoParts",
".",
"get",
"(",
"0",
")",
",",
"partitionType",
")",
";",
"partitionsDMLInfo",
".",
"put",
"(",
"partitionInfoParts",
".",
"get",
"(",
"0",
")",
",",
"partitionInfoParts",
".",
"get",
"(",
"1",
")",
")",
";",
"}",
"}",
"}"
] | It fills partitionsDDLInfo and partitionsDMLInfo with the partition information
@param conversionEntity conversion entity to
@param partitionsDDLInfo partition type information, to be filled by this method
@param partitionsDMLInfo partition key-value pair, to be filled by this method | [
"It",
"fills",
"partitionsDDLInfo",
"and",
"partitionsDMLInfo",
"with",
"the",
"partition",
"information"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java#L251-L285 |
25,751 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | HiveConverterUtils.createStagingDirectory | public static void createStagingDirectory(FileSystem fs, String destination, HiveProcessingEntity conversionEntity,
WorkUnitState workUnit) {
/*
* Create staging data location with the same permissions as source data location
*
* Note that hive can also automatically create the non-existing directories but it does not
* seem to create it with the desired permissions.
* According to hive docs permissions for newly created directories/files can be controlled using uMask like,
*
* SET hive.warehouse.subdir.inherit.perms=false;
* SET fs.permissions.umask-mode=022;
* Upon testing, this did not work
*/
Path destinationPath = new Path(destination);
try {
FsPermission permission;
String group = null;
if (conversionEntity.getTable().getDataLocation() != null) {
FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
permission = sourceDataFileStatus.getPermission();
group = sourceDataFileStatus.getGroup();
} else {
permission = FsPermission.getDefault();
}
if (!fs.mkdirs(destinationPath, permission)) {
throw new RuntimeException(String.format("Failed to create path %s with permissions %s",
destinationPath, permission));
} else {
fs.setPermission(destinationPath, permission);
// Set the same group as source
if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP, DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
fs.setOwner(destinationPath, null, group);
}
log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission, group));
}
} catch (IOException e) {
Throwables.propagate(e);
}
} | java | public static void createStagingDirectory(FileSystem fs, String destination, HiveProcessingEntity conversionEntity,
WorkUnitState workUnit) {
/*
* Create staging data location with the same permissions as source data location
*
* Note that hive can also automatically create the non-existing directories but it does not
* seem to create it with the desired permissions.
* According to hive docs permissions for newly created directories/files can be controlled using uMask like,
*
* SET hive.warehouse.subdir.inherit.perms=false;
* SET fs.permissions.umask-mode=022;
* Upon testing, this did not work
*/
Path destinationPath = new Path(destination);
try {
FsPermission permission;
String group = null;
if (conversionEntity.getTable().getDataLocation() != null) {
FileStatus sourceDataFileStatus = fs.getFileStatus(conversionEntity.getTable().getDataLocation());
permission = sourceDataFileStatus.getPermission();
group = sourceDataFileStatus.getGroup();
} else {
permission = FsPermission.getDefault();
}
if (!fs.mkdirs(destinationPath, permission)) {
throw new RuntimeException(String.format("Failed to create path %s with permissions %s",
destinationPath, permission));
} else {
fs.setPermission(destinationPath, permission);
// Set the same group as source
if (group != null && !workUnit.getPropAsBoolean(HIVE_DATASET_DESTINATION_SKIP_SETGROUP, DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP)) {
fs.setOwner(destinationPath, null, group);
}
log.info(String.format("Created %s with permissions %s and group %s", destinationPath, permission, group));
}
} catch (IOException e) {
Throwables.propagate(e);
}
} | [
"public",
"static",
"void",
"createStagingDirectory",
"(",
"FileSystem",
"fs",
",",
"String",
"destination",
",",
"HiveProcessingEntity",
"conversionEntity",
",",
"WorkUnitState",
"workUnit",
")",
"{",
"/*\n * Create staging data location with the same permissions as source data location\n *\n * Note that hive can also automatically create the non-existing directories but it does not\n * seem to create it with the desired permissions.\n * According to hive docs permissions for newly created directories/files can be controlled using uMask like,\n *\n * SET hive.warehouse.subdir.inherit.perms=false;\n * SET fs.permissions.umask-mode=022;\n * Upon testing, this did not work\n */",
"Path",
"destinationPath",
"=",
"new",
"Path",
"(",
"destination",
")",
";",
"try",
"{",
"FsPermission",
"permission",
";",
"String",
"group",
"=",
"null",
";",
"if",
"(",
"conversionEntity",
".",
"getTable",
"(",
")",
".",
"getDataLocation",
"(",
")",
"!=",
"null",
")",
"{",
"FileStatus",
"sourceDataFileStatus",
"=",
"fs",
".",
"getFileStatus",
"(",
"conversionEntity",
".",
"getTable",
"(",
")",
".",
"getDataLocation",
"(",
")",
")",
";",
"permission",
"=",
"sourceDataFileStatus",
".",
"getPermission",
"(",
")",
";",
"group",
"=",
"sourceDataFileStatus",
".",
"getGroup",
"(",
")",
";",
"}",
"else",
"{",
"permission",
"=",
"FsPermission",
".",
"getDefault",
"(",
")",
";",
"}",
"if",
"(",
"!",
"fs",
".",
"mkdirs",
"(",
"destinationPath",
",",
"permission",
")",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"String",
".",
"format",
"(",
"\"Failed to create path %s with permissions %s\"",
",",
"destinationPath",
",",
"permission",
")",
")",
";",
"}",
"else",
"{",
"fs",
".",
"setPermission",
"(",
"destinationPath",
",",
"permission",
")",
";",
"// Set the same group as source",
"if",
"(",
"group",
"!=",
"null",
"&&",
"!",
"workUnit",
".",
"getPropAsBoolean",
"(",
"HIVE_DATASET_DESTINATION_SKIP_SETGROUP",
",",
"DEFAULT_HIVE_DATASET_DESTINATION_SKIP_SETGROUP",
")",
")",
"{",
"fs",
".",
"setOwner",
"(",
"destinationPath",
",",
"null",
",",
"group",
")",
";",
"}",
"log",
".",
"info",
"(",
"String",
".",
"format",
"(",
"\"Created %s with permissions %s and group %s\"",
",",
"destinationPath",
",",
"permission",
",",
"group",
")",
")",
";",
"}",
"}",
"catch",
"(",
"IOException",
"e",
")",
"{",
"Throwables",
".",
"propagate",
"(",
"e",
")",
";",
"}",
"}"
] | Creates a staging directory with the permission as in source directory.
@param fs filesystem object
@param destination staging directory location
@param conversionEntity conversion entity used to get source directory permissions
@param workUnit workunit | [
"Creates",
"a",
"staging",
"directory",
"with",
"the",
"permission",
"as",
"in",
"source",
"directory",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java#L294-L333 |
25,752 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java | HiveConverterUtils.getDestinationTableMeta | public static Pair<Optional<Table>, Optional<List<Partition>>> getDestinationTableMeta(String dbName,
String tableName, Properties props) {
Optional<Table> table = Optional.<Table>absent();
Optional<List<Partition>> partitions = Optional.<List<Partition>>absent();
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(props,
Optional.fromNullable(props.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
table = Optional.of(client.get().getTable(dbName, tableName));
if (table.isPresent()) {
org.apache.hadoop.hive.ql.metadata.Table qlTable = new org.apache.hadoop.hive.ql.metadata.Table(table.get());
if (HiveUtils.isPartitioned(qlTable)) {
partitions = Optional.of(HiveUtils.getPartitions(client.get(), qlTable, Optional.<String>absent()));
}
}
}
} catch (NoSuchObjectException e) {
return ImmutablePair.of(table, partitions);
} catch (IOException | TException e) {
throw new RuntimeException("Could not fetch destination table metadata", e);
}
return ImmutablePair.of(table, partitions);
} | java | public static Pair<Optional<Table>, Optional<List<Partition>>> getDestinationTableMeta(String dbName,
String tableName, Properties props) {
Optional<Table> table = Optional.<Table>absent();
Optional<List<Partition>> partitions = Optional.<List<Partition>>absent();
try {
HiveMetastoreClientPool pool = HiveMetastoreClientPool.get(props,
Optional.fromNullable(props.getProperty(HiveDatasetFinder.HIVE_METASTORE_URI_KEY)));
try (AutoReturnableObject<IMetaStoreClient> client = pool.getClient()) {
table = Optional.of(client.get().getTable(dbName, tableName));
if (table.isPresent()) {
org.apache.hadoop.hive.ql.metadata.Table qlTable = new org.apache.hadoop.hive.ql.metadata.Table(table.get());
if (HiveUtils.isPartitioned(qlTable)) {
partitions = Optional.of(HiveUtils.getPartitions(client.get(), qlTable, Optional.<String>absent()));
}
}
}
} catch (NoSuchObjectException e) {
return ImmutablePair.of(table, partitions);
} catch (IOException | TException e) {
throw new RuntimeException("Could not fetch destination table metadata", e);
}
return ImmutablePair.of(table, partitions);
} | [
"public",
"static",
"Pair",
"<",
"Optional",
"<",
"Table",
">",
",",
"Optional",
"<",
"List",
"<",
"Partition",
">",
">",
">",
"getDestinationTableMeta",
"(",
"String",
"dbName",
",",
"String",
"tableName",
",",
"Properties",
"props",
")",
"{",
"Optional",
"<",
"Table",
">",
"table",
"=",
"Optional",
".",
"<",
"Table",
">",
"absent",
"(",
")",
";",
"Optional",
"<",
"List",
"<",
"Partition",
">",
">",
"partitions",
"=",
"Optional",
".",
"<",
"List",
"<",
"Partition",
">",
">",
"absent",
"(",
")",
";",
"try",
"{",
"HiveMetastoreClientPool",
"pool",
"=",
"HiveMetastoreClientPool",
".",
"get",
"(",
"props",
",",
"Optional",
".",
"fromNullable",
"(",
"props",
".",
"getProperty",
"(",
"HiveDatasetFinder",
".",
"HIVE_METASTORE_URI_KEY",
")",
")",
")",
";",
"try",
"(",
"AutoReturnableObject",
"<",
"IMetaStoreClient",
">",
"client",
"=",
"pool",
".",
"getClient",
"(",
")",
")",
"{",
"table",
"=",
"Optional",
".",
"of",
"(",
"client",
".",
"get",
"(",
")",
".",
"getTable",
"(",
"dbName",
",",
"tableName",
")",
")",
";",
"if",
"(",
"table",
".",
"isPresent",
"(",
")",
")",
"{",
"org",
".",
"apache",
".",
"hadoop",
".",
"hive",
".",
"ql",
".",
"metadata",
".",
"Table",
"qlTable",
"=",
"new",
"org",
".",
"apache",
".",
"hadoop",
".",
"hive",
".",
"ql",
".",
"metadata",
".",
"Table",
"(",
"table",
".",
"get",
"(",
")",
")",
";",
"if",
"(",
"HiveUtils",
".",
"isPartitioned",
"(",
"qlTable",
")",
")",
"{",
"partitions",
"=",
"Optional",
".",
"of",
"(",
"HiveUtils",
".",
"getPartitions",
"(",
"client",
".",
"get",
"(",
")",
",",
"qlTable",
",",
"Optional",
".",
"<",
"String",
">",
"absent",
"(",
")",
")",
")",
";",
"}",
"}",
"}",
"}",
"catch",
"(",
"NoSuchObjectException",
"e",
")",
"{",
"return",
"ImmutablePair",
".",
"of",
"(",
"table",
",",
"partitions",
")",
";",
"}",
"catch",
"(",
"IOException",
"|",
"TException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Could not fetch destination table metadata\"",
",",
"e",
")",
";",
"}",
"return",
"ImmutablePair",
".",
"of",
"(",
"table",
",",
"partitions",
")",
";",
"}"
] | Returns a pair of Hive table and its partitions
@param dbName db name
@param tableName table name
@param props properties
@return a pair of Hive table and its partitions
@throws DataConversionException | [
"Returns",
"a",
"pair",
"of",
"Hive",
"table",
"and",
"its",
"partitions"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/conversion/hive/task/HiveConverterUtils.java#L425-L450 |
25,753 | apache/incubator-gobblin | gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/TeradataBufferedInserter.java | TeradataBufferedInserter.getColumnPosSqlTypes | private Map<Integer, Integer> getColumnPosSqlTypes() {
try {
final Map<Integer, Integer> columnPosSqlTypes = Maps.newHashMap();
ParameterMetaData pMetaData = this.insertPstmtForFixedBatch.getParameterMetaData();
for (int i = 1; i <= pMetaData.getParameterCount(); i++) {
columnPosSqlTypes.put(i, pMetaData.getParameterType(i));
}
return columnPosSqlTypes;
} catch (SQLException e) {
throw new RuntimeException("Cannot retrieve columns types for batch insert", e);
}
} | java | private Map<Integer, Integer> getColumnPosSqlTypes() {
try {
final Map<Integer, Integer> columnPosSqlTypes = Maps.newHashMap();
ParameterMetaData pMetaData = this.insertPstmtForFixedBatch.getParameterMetaData();
for (int i = 1; i <= pMetaData.getParameterCount(); i++) {
columnPosSqlTypes.put(i, pMetaData.getParameterType(i));
}
return columnPosSqlTypes;
} catch (SQLException e) {
throw new RuntimeException("Cannot retrieve columns types for batch insert", e);
}
} | [
"private",
"Map",
"<",
"Integer",
",",
"Integer",
">",
"getColumnPosSqlTypes",
"(",
")",
"{",
"try",
"{",
"final",
"Map",
"<",
"Integer",
",",
"Integer",
">",
"columnPosSqlTypes",
"=",
"Maps",
".",
"newHashMap",
"(",
")",
";",
"ParameterMetaData",
"pMetaData",
"=",
"this",
".",
"insertPstmtForFixedBatch",
".",
"getParameterMetaData",
"(",
")",
";",
"for",
"(",
"int",
"i",
"=",
"1",
";",
"i",
"<=",
"pMetaData",
".",
"getParameterCount",
"(",
")",
";",
"i",
"++",
")",
"{",
"columnPosSqlTypes",
".",
"put",
"(",
"i",
",",
"pMetaData",
".",
"getParameterType",
"(",
"i",
")",
")",
";",
"}",
"return",
"columnPosSqlTypes",
";",
"}",
"catch",
"(",
"SQLException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Cannot retrieve columns types for batch insert\"",
",",
"e",
")",
";",
"}",
"}"
] | Creates a mapping between column positions and their data types
@return A map containing the position of the columns along with their data type as value | [
"Creates",
"a",
"mapping",
"between",
"column",
"positions",
"and",
"their",
"data",
"types"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-sql/src/main/java/org/apache/gobblin/writer/commands/TeradataBufferedInserter.java#L108-L119 |
25,754 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/writer/http/SalesforceRestWriter.java | SalesforceRestWriter.processBatchRequestResponse | private void processBatchRequestResponse(CloseableHttpResponse response) throws IOException,
UnexpectedResponseException {
String entityStr = EntityUtils.toString(response.getEntity());
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode >= 400) {
throw new RuntimeException("Failed due to " + entityStr + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
JsonObject jsonBody = new JsonParser().parse(entityStr).getAsJsonObject();
if (!jsonBody.get("hasErrors").getAsBoolean()) {
return;
}
JsonArray results = jsonBody.get("results").getAsJsonArray();
for (JsonElement jsonElem : results) {
JsonObject json = jsonElem.getAsJsonObject();
int subStatusCode = json.get("statusCode").getAsInt();
if (subStatusCode < 400) {
continue;
} else if (subStatusCode == 400
&& Operation.INSERT_ONLY_NOT_EXIST.equals(operation)) {
JsonElement resultJsonElem = json.get("result");
Preconditions.checkNotNull(resultJsonElem, "Error response should contain result property");
JsonObject resultJsonObject = resultJsonElem.getAsJsonArray().get(0).getAsJsonObject();
if (isDuplicate(resultJsonObject, subStatusCode)) {
continue;
}
}
throw new RuntimeException("Failed due to " + jsonBody + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
} | java | private void processBatchRequestResponse(CloseableHttpResponse response) throws IOException,
UnexpectedResponseException {
String entityStr = EntityUtils.toString(response.getEntity());
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode >= 400) {
throw new RuntimeException("Failed due to " + entityStr + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
JsonObject jsonBody = new JsonParser().parse(entityStr).getAsJsonObject();
if (!jsonBody.get("hasErrors").getAsBoolean()) {
return;
}
JsonArray results = jsonBody.get("results").getAsJsonArray();
for (JsonElement jsonElem : results) {
JsonObject json = jsonElem.getAsJsonObject();
int subStatusCode = json.get("statusCode").getAsInt();
if (subStatusCode < 400) {
continue;
} else if (subStatusCode == 400
&& Operation.INSERT_ONLY_NOT_EXIST.equals(operation)) {
JsonElement resultJsonElem = json.get("result");
Preconditions.checkNotNull(resultJsonElem, "Error response should contain result property");
JsonObject resultJsonObject = resultJsonElem.getAsJsonArray().get(0).getAsJsonObject();
if (isDuplicate(resultJsonObject, subStatusCode)) {
continue;
}
}
throw new RuntimeException("Failed due to " + jsonBody + " (Detail: "
+ ToStringBuilder.reflectionToString(response, ToStringStyle.SHORT_PREFIX_STYLE) + " )");
}
} | [
"private",
"void",
"processBatchRequestResponse",
"(",
"CloseableHttpResponse",
"response",
")",
"throws",
"IOException",
",",
"UnexpectedResponseException",
"{",
"String",
"entityStr",
"=",
"EntityUtils",
".",
"toString",
"(",
"response",
".",
"getEntity",
"(",
")",
")",
";",
"int",
"statusCode",
"=",
"response",
".",
"getStatusLine",
"(",
")",
".",
"getStatusCode",
"(",
")",
";",
"if",
"(",
"statusCode",
">=",
"400",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Failed due to \"",
"+",
"entityStr",
"+",
"\" (Detail: \"",
"+",
"ToStringBuilder",
".",
"reflectionToString",
"(",
"response",
",",
"ToStringStyle",
".",
"SHORT_PREFIX_STYLE",
")",
"+",
"\" )\"",
")",
";",
"}",
"JsonObject",
"jsonBody",
"=",
"new",
"JsonParser",
"(",
")",
".",
"parse",
"(",
"entityStr",
")",
".",
"getAsJsonObject",
"(",
")",
";",
"if",
"(",
"!",
"jsonBody",
".",
"get",
"(",
"\"hasErrors\"",
")",
".",
"getAsBoolean",
"(",
")",
")",
"{",
"return",
";",
"}",
"JsonArray",
"results",
"=",
"jsonBody",
".",
"get",
"(",
"\"results\"",
")",
".",
"getAsJsonArray",
"(",
")",
";",
"for",
"(",
"JsonElement",
"jsonElem",
":",
"results",
")",
"{",
"JsonObject",
"json",
"=",
"jsonElem",
".",
"getAsJsonObject",
"(",
")",
";",
"int",
"subStatusCode",
"=",
"json",
".",
"get",
"(",
"\"statusCode\"",
")",
".",
"getAsInt",
"(",
")",
";",
"if",
"(",
"subStatusCode",
"<",
"400",
")",
"{",
"continue",
";",
"}",
"else",
"if",
"(",
"subStatusCode",
"==",
"400",
"&&",
"Operation",
".",
"INSERT_ONLY_NOT_EXIST",
".",
"equals",
"(",
"operation",
")",
")",
"{",
"JsonElement",
"resultJsonElem",
"=",
"json",
".",
"get",
"(",
"\"result\"",
")",
";",
"Preconditions",
".",
"checkNotNull",
"(",
"resultJsonElem",
",",
"\"Error response should contain result property\"",
")",
";",
"JsonObject",
"resultJsonObject",
"=",
"resultJsonElem",
".",
"getAsJsonArray",
"(",
")",
".",
"get",
"(",
"0",
")",
".",
"getAsJsonObject",
"(",
")",
";",
"if",
"(",
"isDuplicate",
"(",
"resultJsonObject",
",",
"subStatusCode",
")",
")",
"{",
"continue",
";",
"}",
"}",
"throw",
"new",
"RuntimeException",
"(",
"\"Failed due to \"",
"+",
"jsonBody",
"+",
"\" (Detail: \"",
"+",
"ToStringBuilder",
".",
"reflectionToString",
"(",
"response",
",",
"ToStringStyle",
".",
"SHORT_PREFIX_STYLE",
")",
"+",
"\" )\"",
")",
";",
"}",
"}"
] | Check results from batch response, if any of the results is failure throw exception.
@param response
@throws IOException
@throws UnexpectedResponseException | [
"Check",
"results",
"from",
"batch",
"response",
"if",
"any",
"of",
"the",
"results",
"is",
"failure",
"throw",
"exception",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/writer/http/SalesforceRestWriter.java#L326-L358 |
25,755 | apache/incubator-gobblin | gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java | MD5Digest.fromString | public static MD5Digest fromString(String md5String) {
byte[] bytes;
try {
bytes = Hex.decodeHex(md5String.toCharArray());
return new MD5Digest(md5String, bytes);
} catch (DecoderException e) {
throw new IllegalArgumentException("Unable to convert md5string", e);
}
} | java | public static MD5Digest fromString(String md5String) {
byte[] bytes;
try {
bytes = Hex.decodeHex(md5String.toCharArray());
return new MD5Digest(md5String, bytes);
} catch (DecoderException e) {
throw new IllegalArgumentException("Unable to convert md5string", e);
}
} | [
"public",
"static",
"MD5Digest",
"fromString",
"(",
"String",
"md5String",
")",
"{",
"byte",
"[",
"]",
"bytes",
";",
"try",
"{",
"bytes",
"=",
"Hex",
".",
"decodeHex",
"(",
"md5String",
".",
"toCharArray",
"(",
")",
")",
";",
"return",
"new",
"MD5Digest",
"(",
"md5String",
",",
"bytes",
")",
";",
"}",
"catch",
"(",
"DecoderException",
"e",
")",
"{",
"throw",
"new",
"IllegalArgumentException",
"(",
"\"Unable to convert md5string\"",
",",
"e",
")",
";",
"}",
"}"
] | Static method to get an MD5Digest from a human-readable string representation
@param md5String
@return a filled out MD5Digest | [
"Static",
"method",
"to",
"get",
"an",
"MD5Digest",
"from",
"a",
"human",
"-",
"readable",
"string",
"representation"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java#L57-L65 |
25,756 | apache/incubator-gobblin | gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java | MD5Digest.fromBytes | public static MD5Digest fromBytes(byte[] md5Bytes)
{
Preconditions.checkArgument(md5Bytes.length == MD5_BYTES_LENGTH,
"md5 bytes must be " + MD5_BYTES_LENGTH + " bytes in length, found " + md5Bytes.length + " bytes.");
String md5String = Hex.encodeHexString(md5Bytes);
return new MD5Digest(md5String, md5Bytes);
} | java | public static MD5Digest fromBytes(byte[] md5Bytes)
{
Preconditions.checkArgument(md5Bytes.length == MD5_BYTES_LENGTH,
"md5 bytes must be " + MD5_BYTES_LENGTH + " bytes in length, found " + md5Bytes.length + " bytes.");
String md5String = Hex.encodeHexString(md5Bytes);
return new MD5Digest(md5String, md5Bytes);
} | [
"public",
"static",
"MD5Digest",
"fromBytes",
"(",
"byte",
"[",
"]",
"md5Bytes",
")",
"{",
"Preconditions",
".",
"checkArgument",
"(",
"md5Bytes",
".",
"length",
"==",
"MD5_BYTES_LENGTH",
",",
"\"md5 bytes must be \"",
"+",
"MD5_BYTES_LENGTH",
"+",
"\" bytes in length, found \"",
"+",
"md5Bytes",
".",
"length",
"+",
"\" bytes.\"",
")",
";",
"String",
"md5String",
"=",
"Hex",
".",
"encodeHexString",
"(",
"md5Bytes",
")",
";",
"return",
"new",
"MD5Digest",
"(",
"md5String",
",",
"md5Bytes",
")",
";",
"}"
] | Static method to get an MD5Digest from a binary byte representation
@param md5Bytes
@return a filled out MD5Digest | [
"Static",
"method",
"to",
"get",
"an",
"MD5Digest",
"from",
"a",
"binary",
"byte",
"representation"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java#L72-L78 |
25,757 | apache/incubator-gobblin | gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java | MD5Digest.fromBytes | public static MD5Digest fromBytes(byte[] md5Bytes, int offset) {
byte[] md5BytesCopy = Arrays.copyOfRange(md5Bytes, offset, offset + MD5_BYTES_LENGTH);
//TODO: Replace this with a version that encodes without needing a copy.
String md5String = Hex.encodeHexString(md5BytesCopy);
return new MD5Digest(md5String, md5BytesCopy);
} | java | public static MD5Digest fromBytes(byte[] md5Bytes, int offset) {
byte[] md5BytesCopy = Arrays.copyOfRange(md5Bytes, offset, offset + MD5_BYTES_LENGTH);
//TODO: Replace this with a version that encodes without needing a copy.
String md5String = Hex.encodeHexString(md5BytesCopy);
return new MD5Digest(md5String, md5BytesCopy);
} | [
"public",
"static",
"MD5Digest",
"fromBytes",
"(",
"byte",
"[",
"]",
"md5Bytes",
",",
"int",
"offset",
")",
"{",
"byte",
"[",
"]",
"md5BytesCopy",
"=",
"Arrays",
".",
"copyOfRange",
"(",
"md5Bytes",
",",
"offset",
",",
"offset",
"+",
"MD5_BYTES_LENGTH",
")",
";",
"//TODO: Replace this with a version that encodes without needing a copy.",
"String",
"md5String",
"=",
"Hex",
".",
"encodeHexString",
"(",
"md5BytesCopy",
")",
";",
"return",
"new",
"MD5Digest",
"(",
"md5String",
",",
"md5BytesCopy",
")",
";",
"}"
] | Static method to get an MD5Digest from a binary byte representation.
@param md5Bytes
@param offset in the byte array to start reading from
@return a filled out MD5Digest | [
"Static",
"method",
"to",
"get",
"an",
"MD5Digest",
"from",
"a",
"binary",
"byte",
"representation",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-kafka-common/src/main/java/org/apache/gobblin/kafka/serialize/MD5Digest.java#L86-L91 |
25,758 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ConcurrentBoundedPriorityIterable.java | ConcurrentBoundedPriorityIterable.add | public boolean add(T t) {
if (this.closed) {
throw new RuntimeException(
ConcurrentBoundedPriorityIterable.class.getSimpleName() + " is no longer accepting requests!");
}
AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> newElement =
new AllocatedRequestsIteratorBase.RequestWithResourceRequirement<>(t,
this.estimator.estimateRequirement(t, this.resourcePool));
boolean addedWorkunits = addImpl(newElement);
if (!addedWorkunits) {
this.rejectedElement = true;
}
return addedWorkunits;
} | java | public boolean add(T t) {
if (this.closed) {
throw new RuntimeException(
ConcurrentBoundedPriorityIterable.class.getSimpleName() + " is no longer accepting requests!");
}
AllocatedRequestsIteratorBase.RequestWithResourceRequirement<T> newElement =
new AllocatedRequestsIteratorBase.RequestWithResourceRequirement<>(t,
this.estimator.estimateRequirement(t, this.resourcePool));
boolean addedWorkunits = addImpl(newElement);
if (!addedWorkunits) {
this.rejectedElement = true;
}
return addedWorkunits;
} | [
"public",
"boolean",
"add",
"(",
"T",
"t",
")",
"{",
"if",
"(",
"this",
".",
"closed",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"ConcurrentBoundedPriorityIterable",
".",
"class",
".",
"getSimpleName",
"(",
")",
"+",
"\" is no longer accepting requests!\"",
")",
";",
"}",
"AllocatedRequestsIteratorBase",
".",
"RequestWithResourceRequirement",
"<",
"T",
">",
"newElement",
"=",
"new",
"AllocatedRequestsIteratorBase",
".",
"RequestWithResourceRequirement",
"<>",
"(",
"t",
",",
"this",
".",
"estimator",
".",
"estimateRequirement",
"(",
"t",
",",
"this",
".",
"resourcePool",
")",
")",
";",
"boolean",
"addedWorkunits",
"=",
"addImpl",
"(",
"newElement",
")",
";",
"if",
"(",
"!",
"addedWorkunits",
")",
"{",
"this",
".",
"rejectedElement",
"=",
"true",
";",
"}",
"return",
"addedWorkunits",
";",
"}"
] | Offer an element to the container.
@return true if the element was added, false if there was no space and we could not evict any elements to make it fit.
Note that the element may get evicted by future offers, so a return of true is not a guarantee that the
element will be present at any time in the future. | [
"Offer",
"an",
"element",
"to",
"the",
"container",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/request_allocation/ConcurrentBoundedPriorityIterable.java#L127-L141 |
25,759 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDataset.java | HiveDataset.sortPartitions | public static List<Partition> sortPartitions(List<Partition> partitions) {
Collections.sort(partitions, new Comparator<Partition>() {
@Override
public int compare(Partition o1, Partition o2) {
return o1.getCompleteName().compareTo(o2.getCompleteName());
}
});
return partitions;
} | java | public static List<Partition> sortPartitions(List<Partition> partitions) {
Collections.sort(partitions, new Comparator<Partition>() {
@Override
public int compare(Partition o1, Partition o2) {
return o1.getCompleteName().compareTo(o2.getCompleteName());
}
});
return partitions;
} | [
"public",
"static",
"List",
"<",
"Partition",
">",
"sortPartitions",
"(",
"List",
"<",
"Partition",
">",
"partitions",
")",
"{",
"Collections",
".",
"sort",
"(",
"partitions",
",",
"new",
"Comparator",
"<",
"Partition",
">",
"(",
")",
"{",
"@",
"Override",
"public",
"int",
"compare",
"(",
"Partition",
"o1",
",",
"Partition",
"o2",
")",
"{",
"return",
"o1",
".",
"getCompleteName",
"(",
")",
".",
"compareTo",
"(",
"o2",
".",
"getCompleteName",
"(",
")",
")",
";",
"}",
"}",
")",
";",
"return",
"partitions",
";",
"}"
] | Sort all partitions inplace on the basis of complete name ie dbName.tableName.partitionName | [
"Sort",
"all",
"partitions",
"inplace",
"on",
"the",
"basis",
"of",
"complete",
"name",
"ie",
"dbName",
".",
"tableName",
".",
"partitionName"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDataset.java#L303-L311 |
25,760 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDataset.java | HiveDataset.getPartitionsFromDataset | public List<Partition> getPartitionsFromDataset() throws IOException{
try (AutoReturnableObject<IMetaStoreClient> client = getClientPool().getClient()) {
List<Partition> partitions =
HiveUtils.getPartitions(client.get(), getTable(), Optional.<String>absent());
return sortPartitions(partitions);
}
} | java | public List<Partition> getPartitionsFromDataset() throws IOException{
try (AutoReturnableObject<IMetaStoreClient> client = getClientPool().getClient()) {
List<Partition> partitions =
HiveUtils.getPartitions(client.get(), getTable(), Optional.<String>absent());
return sortPartitions(partitions);
}
} | [
"public",
"List",
"<",
"Partition",
">",
"getPartitionsFromDataset",
"(",
")",
"throws",
"IOException",
"{",
"try",
"(",
"AutoReturnableObject",
"<",
"IMetaStoreClient",
">",
"client",
"=",
"getClientPool",
"(",
")",
".",
"getClient",
"(",
")",
")",
"{",
"List",
"<",
"Partition",
">",
"partitions",
"=",
"HiveUtils",
".",
"getPartitions",
"(",
"client",
".",
"get",
"(",
")",
",",
"getTable",
"(",
")",
",",
"Optional",
".",
"<",
"String",
">",
"absent",
"(",
")",
")",
";",
"return",
"sortPartitions",
"(",
"partitions",
")",
";",
"}",
"}"
] | This method returns a sorted list of partitions. | [
"This",
"method",
"returns",
"a",
"sorted",
"list",
"of",
"partitions",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/hive/HiveDataset.java#L316-L322 |
25,761 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ConcurrentBoundedWorkUnitList.java | ConcurrentBoundedWorkUnitList.addFileSet | public boolean addFileSet(FileSet<CopyEntity> fileSet, List<WorkUnit> workUnits) {
boolean addedWorkunits = addFileSetImpl(fileSet, workUnits);
if (!addedWorkunits) {
this.rejectedFileSet = true;
}
return addedWorkunits;
} | java | public boolean addFileSet(FileSet<CopyEntity> fileSet, List<WorkUnit> workUnits) {
boolean addedWorkunits = addFileSetImpl(fileSet, workUnits);
if (!addedWorkunits) {
this.rejectedFileSet = true;
}
return addedWorkunits;
} | [
"public",
"boolean",
"addFileSet",
"(",
"FileSet",
"<",
"CopyEntity",
">",
"fileSet",
",",
"List",
"<",
"WorkUnit",
">",
"workUnits",
")",
"{",
"boolean",
"addedWorkunits",
"=",
"addFileSetImpl",
"(",
"fileSet",
",",
"workUnits",
")",
";",
"if",
"(",
"!",
"addedWorkunits",
")",
"{",
"this",
".",
"rejectedFileSet",
"=",
"true",
";",
"}",
"return",
"addedWorkunits",
";",
"}"
] | Add a file set to the container.
@param fileSet File set, expressed as a {@link org.apache.gobblin.data.management.partition.FileSet} of {@link CopyEntity}s.
@param workUnits List of {@link WorkUnit}s corresponding to this file set.
@return true if the file set was added to the container, false otherwise (i.e. has reached max size). | [
"Add",
"a",
"file",
"set",
"to",
"the",
"container",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/copy/ConcurrentBoundedWorkUnitList.java#L108-L114 |
25,762 | apache/incubator-gobblin | gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionCleaner.java | HivePartitionVersionRetentionCleaner.clean | @Override
public void clean()
throws IOException {
Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation();
Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation();
String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN();
State state = new State(this.state);
this.fs = ProxyUtils.getOwnerFs(state, this.versionOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils.getQueryExecutor(state, this.versionOwner)) {
log.info("Trying to clean version " + completeName);
if (!this.fs.exists(versionLocation)) {
log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName);
} else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) {
log.info(
"Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (this.nonDeletableVersionLocations.contains(versionLocation.toString())) {
log.info(
"This version corresponds to the non deletable version. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (HadoopUtils.hasContent(this.fs, versionLocation)) {
if (this.simulate) {
log.info("Simulate is set to true. Won't delete the partition " + completeName);
return;
}
log.info("Deleting data from the version " + completeName);
this.fs.delete(versionLocation, true);
}
executeDropVersionQueries(queryExecutor);
}
} | java | @Override
public void clean()
throws IOException {
Path versionLocation = ((HivePartitionRetentionVersion) this.datasetVersion).getLocation();
Path datasetLocation = ((CleanableHivePartitionDataset) this.cleanableDataset).getLocation();
String completeName = ((HivePartitionRetentionVersion) this.datasetVersion).datasetURN();
State state = new State(this.state);
this.fs = ProxyUtils.getOwnerFs(state, this.versionOwner);
try (HiveProxyQueryExecutor queryExecutor = ProxyUtils.getQueryExecutor(state, this.versionOwner)) {
log.info("Trying to clean version " + completeName);
if (!this.fs.exists(versionLocation)) {
log.info("Data versionLocation doesn't exist. Metadata will be dropped for the version " + completeName);
} else if (datasetLocation.toString().equalsIgnoreCase(versionLocation.toString())) {
log.info(
"Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (this.nonDeletableVersionLocations.contains(versionLocation.toString())) {
log.info(
"This version corresponds to the non deletable version. Won't delete the data but metadata will be dropped for the version "
+ completeName);
} else if (HadoopUtils.hasContent(this.fs, versionLocation)) {
if (this.simulate) {
log.info("Simulate is set to true. Won't delete the partition " + completeName);
return;
}
log.info("Deleting data from the version " + completeName);
this.fs.delete(versionLocation, true);
}
executeDropVersionQueries(queryExecutor);
}
} | [
"@",
"Override",
"public",
"void",
"clean",
"(",
")",
"throws",
"IOException",
"{",
"Path",
"versionLocation",
"=",
"(",
"(",
"HivePartitionRetentionVersion",
")",
"this",
".",
"datasetVersion",
")",
".",
"getLocation",
"(",
")",
";",
"Path",
"datasetLocation",
"=",
"(",
"(",
"CleanableHivePartitionDataset",
")",
"this",
".",
"cleanableDataset",
")",
".",
"getLocation",
"(",
")",
";",
"String",
"completeName",
"=",
"(",
"(",
"HivePartitionRetentionVersion",
")",
"this",
".",
"datasetVersion",
")",
".",
"datasetURN",
"(",
")",
";",
"State",
"state",
"=",
"new",
"State",
"(",
"this",
".",
"state",
")",
";",
"this",
".",
"fs",
"=",
"ProxyUtils",
".",
"getOwnerFs",
"(",
"state",
",",
"this",
".",
"versionOwner",
")",
";",
"try",
"(",
"HiveProxyQueryExecutor",
"queryExecutor",
"=",
"ProxyUtils",
".",
"getQueryExecutor",
"(",
"state",
",",
"this",
".",
"versionOwner",
")",
")",
"{",
"log",
".",
"info",
"(",
"\"Trying to clean version \"",
"+",
"completeName",
")",
";",
"if",
"(",
"!",
"this",
".",
"fs",
".",
"exists",
"(",
"versionLocation",
")",
")",
"{",
"log",
".",
"info",
"(",
"\"Data versionLocation doesn't exist. Metadata will be dropped for the version \"",
"+",
"completeName",
")",
";",
"}",
"else",
"if",
"(",
"datasetLocation",
".",
"toString",
"(",
")",
".",
"equalsIgnoreCase",
"(",
"versionLocation",
".",
"toString",
"(",
")",
")",
")",
"{",
"log",
".",
"info",
"(",
"\"Dataset location is same as version location. Won't delete the data but metadata will be dropped for the version \"",
"+",
"completeName",
")",
";",
"}",
"else",
"if",
"(",
"this",
".",
"nonDeletableVersionLocations",
".",
"contains",
"(",
"versionLocation",
".",
"toString",
"(",
")",
")",
")",
"{",
"log",
".",
"info",
"(",
"\"This version corresponds to the non deletable version. Won't delete the data but metadata will be dropped for the version \"",
"+",
"completeName",
")",
";",
"}",
"else",
"if",
"(",
"HadoopUtils",
".",
"hasContent",
"(",
"this",
".",
"fs",
",",
"versionLocation",
")",
")",
"{",
"if",
"(",
"this",
".",
"simulate",
")",
"{",
"log",
".",
"info",
"(",
"\"Simulate is set to true. Won't delete the partition \"",
"+",
"completeName",
")",
";",
"return",
";",
"}",
"log",
".",
"info",
"(",
"\"Deleting data from the version \"",
"+",
"completeName",
")",
";",
"this",
".",
"fs",
".",
"delete",
"(",
"versionLocation",
",",
"true",
")",
";",
"}",
"executeDropVersionQueries",
"(",
"queryExecutor",
")",
";",
"}",
"}"
] | If simulate is set to true, this will simply return.
If version is pointing to an empty location, drop the partition and close the jdbc connection.
If version is pointing to the same location as of the dataset, then drop the partition and close the jdbc connection.
If version is pointing to the non deletable version locations, then drop the partition and close the jdbc connection.
Otherwise delete the data underneath, drop the partition and close the jdbc connection. | [
"If",
"simulate",
"is",
"set",
"to",
"true",
"this",
"will",
"simply",
"return",
".",
"If",
"version",
"is",
"pointing",
"to",
"an",
"empty",
"location",
"drop",
"the",
"partition",
"and",
"close",
"the",
"jdbc",
"connection",
".",
"If",
"version",
"is",
"pointing",
"to",
"the",
"same",
"location",
"as",
"of",
"the",
"dataset",
"then",
"drop",
"the",
"partition",
"and",
"close",
"the",
"jdbc",
"connection",
".",
"If",
"version",
"is",
"pointing",
"to",
"the",
"non",
"deletable",
"version",
"locations",
"then",
"drop",
"the",
"partition",
"and",
"close",
"the",
"jdbc",
"connection",
".",
"Otherwise",
"delete",
"the",
"data",
"underneath",
"drop",
"the",
"partition",
"and",
"close",
"the",
"jdbc",
"connection",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-compliance/src/main/java/org/apache/gobblin/compliance/retention/HivePartitionVersionRetentionCleaner.java#L70-L102 |
25,763 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/HiveJdbcConnector.java | HiveJdbcConnector.addHiveSiteDirToClasspath | private static void addHiveSiteDirToClasspath(String hiveSiteDir) {
LOG.info("Adding " + hiveSiteDir + " to CLASSPATH");
File f = new File(hiveSiteDir);
try {
URL u = f.toURI().toURL();
URLClassLoader urlClassLoader = (URLClassLoader) ClassLoader.getSystemClassLoader();
Class<URLClassLoader> urlClass = URLClassLoader.class;
Method method = urlClass.getDeclaredMethod("addURL", new Class[] { URL.class });
method.setAccessible(true);
method.invoke(urlClassLoader, new Object[] { u });
} catch (ReflectiveOperationException | IOException e) {
throw new RuntimeException("Unable to add hive.site.dir to CLASSPATH", e);
}
} | java | private static void addHiveSiteDirToClasspath(String hiveSiteDir) {
LOG.info("Adding " + hiveSiteDir + " to CLASSPATH");
File f = new File(hiveSiteDir);
try {
URL u = f.toURI().toURL();
URLClassLoader urlClassLoader = (URLClassLoader) ClassLoader.getSystemClassLoader();
Class<URLClassLoader> urlClass = URLClassLoader.class;
Method method = urlClass.getDeclaredMethod("addURL", new Class[] { URL.class });
method.setAccessible(true);
method.invoke(urlClassLoader, new Object[] { u });
} catch (ReflectiveOperationException | IOException e) {
throw new RuntimeException("Unable to add hive.site.dir to CLASSPATH", e);
}
} | [
"private",
"static",
"void",
"addHiveSiteDirToClasspath",
"(",
"String",
"hiveSiteDir",
")",
"{",
"LOG",
".",
"info",
"(",
"\"Adding \"",
"+",
"hiveSiteDir",
"+",
"\" to CLASSPATH\"",
")",
";",
"File",
"f",
"=",
"new",
"File",
"(",
"hiveSiteDir",
")",
";",
"try",
"{",
"URL",
"u",
"=",
"f",
".",
"toURI",
"(",
")",
".",
"toURL",
"(",
")",
";",
"URLClassLoader",
"urlClassLoader",
"=",
"(",
"URLClassLoader",
")",
"ClassLoader",
".",
"getSystemClassLoader",
"(",
")",
";",
"Class",
"<",
"URLClassLoader",
">",
"urlClass",
"=",
"URLClassLoader",
".",
"class",
";",
"Method",
"method",
"=",
"urlClass",
".",
"getDeclaredMethod",
"(",
"\"addURL\"",
",",
"new",
"Class",
"[",
"]",
"{",
"URL",
".",
"class",
"}",
")",
";",
"method",
".",
"setAccessible",
"(",
"true",
")",
";",
"method",
".",
"invoke",
"(",
"urlClassLoader",
",",
"new",
"Object",
"[",
"]",
"{",
"u",
"}",
")",
";",
"}",
"catch",
"(",
"ReflectiveOperationException",
"|",
"IOException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Unable to add hive.site.dir to CLASSPATH\"",
",",
"e",
")",
";",
"}",
"}"
] | Helper method to add the directory containing the hive-site.xml file to the classpath
@param hiveSiteDir is the path to to the folder containing the hive-site.xml file | [
"Helper",
"method",
"to",
"add",
"the",
"directory",
"containing",
"the",
"hive",
"-",
"site",
".",
"xml",
"file",
"to",
"the",
"classpath"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/HiveJdbcConnector.java#L195-L208 |
25,764 | apache/incubator-gobblin | gobblin-utility/src/main/java/org/apache/gobblin/util/HiveJdbcConnector.java | HiveJdbcConnector.choppedStatementNoLineChange | static String choppedStatementNoLineChange(String statement) {
// \r\n needs to be the first element in the pipe.
statement = statement.replaceAll("\\r\\n|\\r|\\n", " ");
if (statement.length() <= MAX_OUTPUT_STMT_LENGTH) {
return statement;
}
return statement.substring(0, MAX_OUTPUT_STMT_LENGTH) + "...... (" + (statement.length() - MAX_OUTPUT_STMT_LENGTH)
+ " characters omitted)";
} | java | static String choppedStatementNoLineChange(String statement) {
// \r\n needs to be the first element in the pipe.
statement = statement.replaceAll("\\r\\n|\\r|\\n", " ");
if (statement.length() <= MAX_OUTPUT_STMT_LENGTH) {
return statement;
}
return statement.substring(0, MAX_OUTPUT_STMT_LENGTH) + "...... (" + (statement.length() - MAX_OUTPUT_STMT_LENGTH)
+ " characters omitted)";
} | [
"static",
"String",
"choppedStatementNoLineChange",
"(",
"String",
"statement",
")",
"{",
"// \\r\\n needs to be the first element in the pipe.",
"statement",
"=",
"statement",
".",
"replaceAll",
"(",
"\"\\\\r\\\\n|\\\\r|\\\\n\"",
",",
"\" \"",
")",
";",
"if",
"(",
"statement",
".",
"length",
"(",
")",
"<=",
"MAX_OUTPUT_STMT_LENGTH",
")",
"{",
"return",
"statement",
";",
"}",
"return",
"statement",
".",
"substring",
"(",
"0",
",",
"MAX_OUTPUT_STMT_LENGTH",
")",
"+",
"\"...... (\"",
"+",
"(",
"statement",
".",
"length",
"(",
")",
"-",
"MAX_OUTPUT_STMT_LENGTH",
")",
"+",
"\" characters omitted)\"",
";",
"}"
] | Chopped statements with all line-changing character being removed for saving space of log. | [
"Chopped",
"statements",
"with",
"all",
"line",
"-",
"changing",
"character",
"being",
"removed",
"for",
"saving",
"space",
"of",
"log",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-utility/src/main/java/org/apache/gobblin/util/HiveJdbcConnector.java#L260-L268 |
25,765 | apache/incubator-gobblin | gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/MultiHopFlowCompiler.java | MultiHopFlowCompiler.addShutdownHook | private void addShutdownHook() {
ServiceManager manager = this.serviceManager;
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
// Give the services 5 seconds to stop to ensure that we are responsive to shutdown
// requests.
try {
manager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
} catch (TimeoutException timeout) {
// stopping timed out
}
}
});
} | java | private void addShutdownHook() {
ServiceManager manager = this.serviceManager;
Runtime.getRuntime().addShutdownHook(new Thread() {
public void run() {
// Give the services 5 seconds to stop to ensure that we are responsive to shutdown
// requests.
try {
manager.stopAsync().awaitStopped(5, TimeUnit.SECONDS);
} catch (TimeoutException timeout) {
// stopping timed out
}
}
});
} | [
"private",
"void",
"addShutdownHook",
"(",
")",
"{",
"ServiceManager",
"manager",
"=",
"this",
".",
"serviceManager",
";",
"Runtime",
".",
"getRuntime",
"(",
")",
".",
"addShutdownHook",
"(",
"new",
"Thread",
"(",
")",
"{",
"public",
"void",
"run",
"(",
")",
"{",
"// Give the services 5 seconds to stop to ensure that we are responsive to shutdown",
"// requests.",
"try",
"{",
"manager",
".",
"stopAsync",
"(",
")",
".",
"awaitStopped",
"(",
"5",
",",
"TimeUnit",
".",
"SECONDS",
")",
";",
"}",
"catch",
"(",
"TimeoutException",
"timeout",
")",
"{",
"// stopping timed out",
"}",
"}",
"}",
")",
";",
"}"
] | Register a shutdown hook for this thread. | [
"Register",
"a",
"shutdown",
"hook",
"for",
"this",
"thread",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-service/src/main/java/org/apache/gobblin/service/modules/flow/MultiHopFlowCompiler.java#L200-L213 |
25,766 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/policies/avro/AvroHeaderTimestampPolicy.java | AvroHeaderTimestampPolicy.executePolicy | @Override
public Result executePolicy(Object record) {
if (!(record instanceof GenericRecord)) {
return RowLevelPolicy.Result.FAILED;
}
GenericRecord header = (GenericRecord) ((GenericRecord) record).get("header");
if (header == null) {
return RowLevelPolicy.Result.FAILED;
}
if (header.get("time") != null || header.get("timestamp") != null) {
return RowLevelPolicy.Result.PASSED;
}
return RowLevelPolicy.Result.FAILED;
} | java | @Override
public Result executePolicy(Object record) {
if (!(record instanceof GenericRecord)) {
return RowLevelPolicy.Result.FAILED;
}
GenericRecord header = (GenericRecord) ((GenericRecord) record).get("header");
if (header == null) {
return RowLevelPolicy.Result.FAILED;
}
if (header.get("time") != null || header.get("timestamp") != null) {
return RowLevelPolicy.Result.PASSED;
}
return RowLevelPolicy.Result.FAILED;
} | [
"@",
"Override",
"public",
"Result",
"executePolicy",
"(",
"Object",
"record",
")",
"{",
"if",
"(",
"!",
"(",
"record",
"instanceof",
"GenericRecord",
")",
")",
"{",
"return",
"RowLevelPolicy",
".",
"Result",
".",
"FAILED",
";",
"}",
"GenericRecord",
"header",
"=",
"(",
"GenericRecord",
")",
"(",
"(",
"GenericRecord",
")",
"record",
")",
".",
"get",
"(",
"\"header\"",
")",
";",
"if",
"(",
"header",
"==",
"null",
")",
"{",
"return",
"RowLevelPolicy",
".",
"Result",
".",
"FAILED",
";",
"}",
"if",
"(",
"header",
".",
"get",
"(",
"\"time\"",
")",
"!=",
"null",
"||",
"header",
".",
"get",
"(",
"\"timestamp\"",
")",
"!=",
"null",
")",
"{",
"return",
"RowLevelPolicy",
".",
"Result",
".",
"PASSED",
";",
"}",
"return",
"RowLevelPolicy",
".",
"Result",
".",
"FAILED",
";",
"}"
] | Return PASS if the record has either header.time or header.timestamp field. | [
"Return",
"PASS",
"if",
"the",
"record",
"has",
"either",
"header",
".",
"time",
"or",
"header",
".",
"timestamp",
"field",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/policies/avro/AvroHeaderTimestampPolicy.java#L40-L54 |
25,767 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java | SftpFsHelper.getSftpChannel | public ChannelSftp getSftpChannel() throws SftpException {
try {
ChannelSftp channelSftp = (ChannelSftp) this.session.openChannel("sftp");
channelSftp.connect();
return channelSftp;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
} | java | public ChannelSftp getSftpChannel() throws SftpException {
try {
ChannelSftp channelSftp = (ChannelSftp) this.session.openChannel("sftp");
channelSftp.connect();
return channelSftp;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
} | [
"public",
"ChannelSftp",
"getSftpChannel",
"(",
")",
"throws",
"SftpException",
"{",
"try",
"{",
"ChannelSftp",
"channelSftp",
"=",
"(",
"ChannelSftp",
")",
"this",
".",
"session",
".",
"openChannel",
"(",
"\"sftp\"",
")",
";",
"channelSftp",
".",
"connect",
"(",
")",
";",
"return",
"channelSftp",
";",
"}",
"catch",
"(",
"JSchException",
"e",
")",
"{",
"throw",
"new",
"SftpException",
"(",
"0",
",",
"\"Cannot open a channel to SFTP server\"",
",",
"e",
")",
";",
"}",
"}"
] | Create new channel every time a command needs to be executed. This is required to support execution of multiple
commands in parallel. All created channels are cleaned up when the session is closed.
@return a new {@link ChannelSftp}
@throws SftpException | [
"Create",
"new",
"channel",
"every",
"time",
"a",
"command",
"needs",
"to",
"be",
"executed",
".",
"This",
"is",
"required",
"to",
"support",
"execution",
"of",
"multiple",
"commands",
"in",
"parallel",
".",
"All",
"created",
"channels",
"are",
"cleaned",
"up",
"when",
"the",
"session",
"is",
"closed",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java#L98-L107 |
25,768 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java | SftpFsHelper.getExecChannel | public ChannelExec getExecChannel(String command) throws SftpException {
ChannelExec channelExec;
try {
channelExec = (ChannelExec) this.session.openChannel("exec");
channelExec.setCommand(command);
channelExec.connect();
return channelExec;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
} | java | public ChannelExec getExecChannel(String command) throws SftpException {
ChannelExec channelExec;
try {
channelExec = (ChannelExec) this.session.openChannel("exec");
channelExec.setCommand(command);
channelExec.connect();
return channelExec;
} catch (JSchException e) {
throw new SftpException(0, "Cannot open a channel to SFTP server", e);
}
} | [
"public",
"ChannelExec",
"getExecChannel",
"(",
"String",
"command",
")",
"throws",
"SftpException",
"{",
"ChannelExec",
"channelExec",
";",
"try",
"{",
"channelExec",
"=",
"(",
"ChannelExec",
")",
"this",
".",
"session",
".",
"openChannel",
"(",
"\"exec\"",
")",
";",
"channelExec",
".",
"setCommand",
"(",
"command",
")",
";",
"channelExec",
".",
"connect",
"(",
")",
";",
"return",
"channelExec",
";",
"}",
"catch",
"(",
"JSchException",
"e",
")",
"{",
"throw",
"new",
"SftpException",
"(",
"0",
",",
"\"Cannot open a channel to SFTP server\"",
",",
"e",
")",
";",
"}",
"}"
] | Create a new sftp channel to execute commands.
@param command to execute on the remote machine
@return a new execution channel
@throws SftpException if a channel could not be opened | [
"Create",
"a",
"new",
"sftp",
"channel",
"to",
"execute",
"commands",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java#L116-L126 |
25,769 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java | SftpFsHelper.connect | @Override
public void connect() throws FileBasedHelperException {
String privateKey = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PRIVATE_KEY));
String password = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
String knownHosts = this.state.getProp(ConfigurationKeys.SOURCE_CONN_KNOWN_HOSTS);
String userName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
String hostName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
int port = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_PORT, ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT);
String proxyHost = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL);
int proxyPort = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT, -1);
JSch.setLogger(new JSchLogger());
JSch jsch = new JSch();
log.info("Attempting to connect to source via SFTP with" + " privateKey: " + privateKey + " knownHosts: "
+ knownHosts + " userName: " + userName + " hostName: " + hostName + " port: " + port + " proxyHost: "
+ proxyHost + " proxyPort: " + proxyPort);
try {
if (!Strings.isNullOrEmpty(privateKey)) {
List<IdentityStrategy> identityStrategies = ImmutableList.of(new LocalFileIdentityStrategy(),
new DistributedCacheIdentityStrategy(), new HDFSIdentityStrategy());
for (IdentityStrategy identityStrategy : identityStrategies) {
if (identityStrategy.setIdentity(privateKey, jsch)) {
break;
}
}
}
this.session = jsch.getSession(userName, hostName, port);
this.session.setConfig("PreferredAuthentications", "publickey,password");
if (Strings.isNullOrEmpty(knownHosts)) {
log.info("Known hosts path is not set, StrictHostKeyChecking will be turned off");
this.session.setConfig("StrictHostKeyChecking", "no");
} else {
jsch.setKnownHosts(knownHosts);
}
if (!Strings.isNullOrEmpty(password)) {
this.session.setPassword(password);
}
if (proxyHost != null && proxyPort >= 0) {
this.session.setProxy(new ProxyHTTP(proxyHost, proxyPort));
}
UserInfo ui = new MyUserInfo();
this.session.setUserInfo(ui);
this.session.setDaemonThread(true);
this.session.connect();
log.info("Finished connecting to source");
} catch (JSchException e) {
if (this.session != null) {
this.session.disconnect();
}
log.error(e.getMessage(), e);
throw new FileBasedHelperException("Cannot connect to SFTP source", e);
}
} | java | @Override
public void connect() throws FileBasedHelperException {
String privateKey = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PRIVATE_KEY));
String password = PasswordManager.getInstance(this.state)
.readPassword(this.state.getProp(ConfigurationKeys.SOURCE_CONN_PASSWORD));
String knownHosts = this.state.getProp(ConfigurationKeys.SOURCE_CONN_KNOWN_HOSTS);
String userName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USERNAME);
String hostName = this.state.getProp(ConfigurationKeys.SOURCE_CONN_HOST_NAME);
int port = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_PORT, ConfigurationKeys.SOURCE_CONN_DEFAULT_PORT);
String proxyHost = this.state.getProp(ConfigurationKeys.SOURCE_CONN_USE_PROXY_URL);
int proxyPort = this.state.getPropAsInt(ConfigurationKeys.SOURCE_CONN_USE_PROXY_PORT, -1);
JSch.setLogger(new JSchLogger());
JSch jsch = new JSch();
log.info("Attempting to connect to source via SFTP with" + " privateKey: " + privateKey + " knownHosts: "
+ knownHosts + " userName: " + userName + " hostName: " + hostName + " port: " + port + " proxyHost: "
+ proxyHost + " proxyPort: " + proxyPort);
try {
if (!Strings.isNullOrEmpty(privateKey)) {
List<IdentityStrategy> identityStrategies = ImmutableList.of(new LocalFileIdentityStrategy(),
new DistributedCacheIdentityStrategy(), new HDFSIdentityStrategy());
for (IdentityStrategy identityStrategy : identityStrategies) {
if (identityStrategy.setIdentity(privateKey, jsch)) {
break;
}
}
}
this.session = jsch.getSession(userName, hostName, port);
this.session.setConfig("PreferredAuthentications", "publickey,password");
if (Strings.isNullOrEmpty(knownHosts)) {
log.info("Known hosts path is not set, StrictHostKeyChecking will be turned off");
this.session.setConfig("StrictHostKeyChecking", "no");
} else {
jsch.setKnownHosts(knownHosts);
}
if (!Strings.isNullOrEmpty(password)) {
this.session.setPassword(password);
}
if (proxyHost != null && proxyPort >= 0) {
this.session.setProxy(new ProxyHTTP(proxyHost, proxyPort));
}
UserInfo ui = new MyUserInfo();
this.session.setUserInfo(ui);
this.session.setDaemonThread(true);
this.session.connect();
log.info("Finished connecting to source");
} catch (JSchException e) {
if (this.session != null) {
this.session.disconnect();
}
log.error(e.getMessage(), e);
throw new FileBasedHelperException("Cannot connect to SFTP source", e);
}
} | [
"@",
"Override",
"public",
"void",
"connect",
"(",
")",
"throws",
"FileBasedHelperException",
"{",
"String",
"privateKey",
"=",
"PasswordManager",
".",
"getInstance",
"(",
"this",
".",
"state",
")",
".",
"readPassword",
"(",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_PRIVATE_KEY",
")",
")",
";",
"String",
"password",
"=",
"PasswordManager",
".",
"getInstance",
"(",
"this",
".",
"state",
")",
".",
"readPassword",
"(",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_PASSWORD",
")",
")",
";",
"String",
"knownHosts",
"=",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_KNOWN_HOSTS",
")",
";",
"String",
"userName",
"=",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_USERNAME",
")",
";",
"String",
"hostName",
"=",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_HOST_NAME",
")",
";",
"int",
"port",
"=",
"this",
".",
"state",
".",
"getPropAsInt",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_PORT",
",",
"ConfigurationKeys",
".",
"SOURCE_CONN_DEFAULT_PORT",
")",
";",
"String",
"proxyHost",
"=",
"this",
".",
"state",
".",
"getProp",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_USE_PROXY_URL",
")",
";",
"int",
"proxyPort",
"=",
"this",
".",
"state",
".",
"getPropAsInt",
"(",
"ConfigurationKeys",
".",
"SOURCE_CONN_USE_PROXY_PORT",
",",
"-",
"1",
")",
";",
"JSch",
".",
"setLogger",
"(",
"new",
"JSchLogger",
"(",
")",
")",
";",
"JSch",
"jsch",
"=",
"new",
"JSch",
"(",
")",
";",
"log",
".",
"info",
"(",
"\"Attempting to connect to source via SFTP with\"",
"+",
"\" privateKey: \"",
"+",
"privateKey",
"+",
"\" knownHosts: \"",
"+",
"knownHosts",
"+",
"\" userName: \"",
"+",
"userName",
"+",
"\" hostName: \"",
"+",
"hostName",
"+",
"\" port: \"",
"+",
"port",
"+",
"\" proxyHost: \"",
"+",
"proxyHost",
"+",
"\" proxyPort: \"",
"+",
"proxyPort",
")",
";",
"try",
"{",
"if",
"(",
"!",
"Strings",
".",
"isNullOrEmpty",
"(",
"privateKey",
")",
")",
"{",
"List",
"<",
"IdentityStrategy",
">",
"identityStrategies",
"=",
"ImmutableList",
".",
"of",
"(",
"new",
"LocalFileIdentityStrategy",
"(",
")",
",",
"new",
"DistributedCacheIdentityStrategy",
"(",
")",
",",
"new",
"HDFSIdentityStrategy",
"(",
")",
")",
";",
"for",
"(",
"IdentityStrategy",
"identityStrategy",
":",
"identityStrategies",
")",
"{",
"if",
"(",
"identityStrategy",
".",
"setIdentity",
"(",
"privateKey",
",",
"jsch",
")",
")",
"{",
"break",
";",
"}",
"}",
"}",
"this",
".",
"session",
"=",
"jsch",
".",
"getSession",
"(",
"userName",
",",
"hostName",
",",
"port",
")",
";",
"this",
".",
"session",
".",
"setConfig",
"(",
"\"PreferredAuthentications\"",
",",
"\"publickey,password\"",
")",
";",
"if",
"(",
"Strings",
".",
"isNullOrEmpty",
"(",
"knownHosts",
")",
")",
"{",
"log",
".",
"info",
"(",
"\"Known hosts path is not set, StrictHostKeyChecking will be turned off\"",
")",
";",
"this",
".",
"session",
".",
"setConfig",
"(",
"\"StrictHostKeyChecking\"",
",",
"\"no\"",
")",
";",
"}",
"else",
"{",
"jsch",
".",
"setKnownHosts",
"(",
"knownHosts",
")",
";",
"}",
"if",
"(",
"!",
"Strings",
".",
"isNullOrEmpty",
"(",
"password",
")",
")",
"{",
"this",
".",
"session",
".",
"setPassword",
"(",
"password",
")",
";",
"}",
"if",
"(",
"proxyHost",
"!=",
"null",
"&&",
"proxyPort",
">=",
"0",
")",
"{",
"this",
".",
"session",
".",
"setProxy",
"(",
"new",
"ProxyHTTP",
"(",
"proxyHost",
",",
"proxyPort",
")",
")",
";",
"}",
"UserInfo",
"ui",
"=",
"new",
"MyUserInfo",
"(",
")",
";",
"this",
".",
"session",
".",
"setUserInfo",
"(",
"ui",
")",
";",
"this",
".",
"session",
".",
"setDaemonThread",
"(",
"true",
")",
";",
"this",
".",
"session",
".",
"connect",
"(",
")",
";",
"log",
".",
"info",
"(",
"\"Finished connecting to source\"",
")",
";",
"}",
"catch",
"(",
"JSchException",
"e",
")",
"{",
"if",
"(",
"this",
".",
"session",
"!=",
"null",
")",
"{",
"this",
".",
"session",
".",
"disconnect",
"(",
")",
";",
"}",
"log",
".",
"error",
"(",
"e",
".",
"getMessage",
"(",
")",
",",
"e",
")",
";",
"throw",
"new",
"FileBasedHelperException",
"(",
"\"Cannot connect to SFTP source\"",
",",
"e",
")",
";",
"}",
"}"
] | Opens up a connection to specified host using the username. Connects to the source using a private key without
prompting for a password. This method does not support connecting to a source using a password, only by private
key
@throws org.apache.gobblin.source.extractor.filebased.FileBasedHelperException | [
"Opens",
"up",
"a",
"connection",
"to",
"specified",
"host",
"using",
"the",
"username",
".",
"Connects",
"to",
"the",
"source",
"using",
"a",
"private",
"key",
"without",
"prompting",
"for",
"a",
"password",
".",
"This",
"method",
"does",
"not",
"support",
"connecting",
"to",
"a",
"source",
"using",
"a",
"password",
"only",
"by",
"private",
"key"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java#L134-L201 |
25,770 | apache/incubator-gobblin | gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java | SftpFsHelper.getFileStream | @Override
public InputStream getFileStream(String file) throws FileBasedHelperException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channel = getSftpChannel();
return new SftpFsFileInputStream(channel.get(file, monitor), channel);
} catch (SftpException e) {
throw new FileBasedHelperException("Cannot download file " + file + " due to " + e.getMessage(), e);
}
} | java | @Override
public InputStream getFileStream(String file) throws FileBasedHelperException {
SftpGetMonitor monitor = new SftpGetMonitor();
try {
ChannelSftp channel = getSftpChannel();
return new SftpFsFileInputStream(channel.get(file, monitor), channel);
} catch (SftpException e) {
throw new FileBasedHelperException("Cannot download file " + file + " due to " + e.getMessage(), e);
}
} | [
"@",
"Override",
"public",
"InputStream",
"getFileStream",
"(",
"String",
"file",
")",
"throws",
"FileBasedHelperException",
"{",
"SftpGetMonitor",
"monitor",
"=",
"new",
"SftpGetMonitor",
"(",
")",
";",
"try",
"{",
"ChannelSftp",
"channel",
"=",
"getSftpChannel",
"(",
")",
";",
"return",
"new",
"SftpFsFileInputStream",
"(",
"channel",
".",
"get",
"(",
"file",
",",
"monitor",
")",
",",
"channel",
")",
";",
"}",
"catch",
"(",
"SftpException",
"e",
")",
"{",
"throw",
"new",
"FileBasedHelperException",
"(",
"\"Cannot download file \"",
"+",
"file",
"+",
"\" due to \"",
"+",
"e",
".",
"getMessage",
"(",
")",
",",
"e",
")",
";",
"}",
"}"
] | Executes a get SftpCommand and returns an input stream to the file
@param cmd is the command to execute
@param sftp is the channel to execute the command on
@throws SftpException | [
"Executes",
"a",
"get",
"SftpCommand",
"and",
"returns",
"an",
"input",
"stream",
"to",
"the",
"file"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core/src/main/java/org/apache/gobblin/source/extractor/extract/sftp/SftpFsHelper.java#L209-L218 |
25,771 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/dataset/DatasetDescriptor.java | DatasetDescriptor.toDataMap | @Deprecated
public Map<String, String> toDataMap() {
Map<String, String> map = Maps.newHashMap();
map.put(PLATFORM_KEY, platform);
map.put(NAME_KEY, getName());
map.putAll(metadata);
return map;
} | java | @Deprecated
public Map<String, String> toDataMap() {
Map<String, String> map = Maps.newHashMap();
map.put(PLATFORM_KEY, platform);
map.put(NAME_KEY, getName());
map.putAll(metadata);
return map;
} | [
"@",
"Deprecated",
"public",
"Map",
"<",
"String",
",",
"String",
">",
"toDataMap",
"(",
")",
"{",
"Map",
"<",
"String",
",",
"String",
">",
"map",
"=",
"Maps",
".",
"newHashMap",
"(",
")",
";",
"map",
".",
"put",
"(",
"PLATFORM_KEY",
",",
"platform",
")",
";",
"map",
".",
"put",
"(",
"NAME_KEY",
",",
"getName",
"(",
")",
")",
";",
"map",
".",
"putAll",
"(",
"metadata",
")",
";",
"return",
"map",
";",
"}"
] | Serialize to a string map
@deprecated use {@link Descriptor#serialize(Descriptor)} | [
"Serialize",
"to",
"a",
"string",
"map"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/dataset/DatasetDescriptor.java#L81-L88 |
25,772 | apache/incubator-gobblin | gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java | YarnAppSecurityManager.renewDelegationToken | private synchronized void renewDelegationToken() throws IOException, InterruptedException {
this.token.renew(this.fs.getConf());
writeDelegationTokenToFile();
if (!this.firstLogin) {
// Send a message to the controller and all the participants if this is not the first login
sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
}
} | java | private synchronized void renewDelegationToken() throws IOException, InterruptedException {
this.token.renew(this.fs.getConf());
writeDelegationTokenToFile();
if (!this.firstLogin) {
// Send a message to the controller and all the participants if this is not the first login
sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
}
} | [
"private",
"synchronized",
"void",
"renewDelegationToken",
"(",
")",
"throws",
"IOException",
",",
"InterruptedException",
"{",
"this",
".",
"token",
".",
"renew",
"(",
"this",
".",
"fs",
".",
"getConf",
"(",
")",
")",
";",
"writeDelegationTokenToFile",
"(",
")",
";",
"if",
"(",
"!",
"this",
".",
"firstLogin",
")",
"{",
"// Send a message to the controller and all the participants if this is not the first login",
"sendTokenFileUpdatedMessage",
"(",
"InstanceType",
".",
"CONTROLLER",
")",
";",
"sendTokenFileUpdatedMessage",
"(",
"InstanceType",
".",
"PARTICIPANT",
")",
";",
"}",
"}"
] | Renew the existing delegation token. | [
"Renew",
"the",
"existing",
"delegation",
"token",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java#L188-L197 |
25,773 | apache/incubator-gobblin | gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java | YarnAppSecurityManager.loginFromKeytab | private void loginFromKeytab() throws IOException {
String keyTabFilePath = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_FILE_PATH);
if (Strings.isNullOrEmpty(keyTabFilePath)) {
throw new IOException("Keytab file path is not defined for Kerberos login");
}
if (!new File(keyTabFilePath).exists()) {
throw new IOException("Keytab file not found at: " + keyTabFilePath);
}
String principal = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_PRINCIPAL_NAME);
if (Strings.isNullOrEmpty(principal)) {
principal = this.loginUser.getShortUserName() + "/localhost@LOCALHOST";
}
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication",
UserGroupInformation.AuthenticationMethod.KERBEROS.toString().toLowerCase());
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(principal, keyTabFilePath);
LOGGER.info(String.format("Logged in from keytab file %s using principal %s", keyTabFilePath, principal));
this.loginUser = UserGroupInformation.getLoginUser();
getNewDelegationTokenForLoginUser();
writeDelegationTokenToFile();
if (!this.firstLogin) {
// Send a message to the controller and all the participants
sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
}
} | java | private void loginFromKeytab() throws IOException {
String keyTabFilePath = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_FILE_PATH);
if (Strings.isNullOrEmpty(keyTabFilePath)) {
throw new IOException("Keytab file path is not defined for Kerberos login");
}
if (!new File(keyTabFilePath).exists()) {
throw new IOException("Keytab file not found at: " + keyTabFilePath);
}
String principal = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_PRINCIPAL_NAME);
if (Strings.isNullOrEmpty(principal)) {
principal = this.loginUser.getShortUserName() + "/localhost@LOCALHOST";
}
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication",
UserGroupInformation.AuthenticationMethod.KERBEROS.toString().toLowerCase());
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab(principal, keyTabFilePath);
LOGGER.info(String.format("Logged in from keytab file %s using principal %s", keyTabFilePath, principal));
this.loginUser = UserGroupInformation.getLoginUser();
getNewDelegationTokenForLoginUser();
writeDelegationTokenToFile();
if (!this.firstLogin) {
// Send a message to the controller and all the participants
sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
}
} | [
"private",
"void",
"loginFromKeytab",
"(",
")",
"throws",
"IOException",
"{",
"String",
"keyTabFilePath",
"=",
"this",
".",
"config",
".",
"getString",
"(",
"GobblinYarnConfigurationKeys",
".",
"KEYTAB_FILE_PATH",
")",
";",
"if",
"(",
"Strings",
".",
"isNullOrEmpty",
"(",
"keyTabFilePath",
")",
")",
"{",
"throw",
"new",
"IOException",
"(",
"\"Keytab file path is not defined for Kerberos login\"",
")",
";",
"}",
"if",
"(",
"!",
"new",
"File",
"(",
"keyTabFilePath",
")",
".",
"exists",
"(",
")",
")",
"{",
"throw",
"new",
"IOException",
"(",
"\"Keytab file not found at: \"",
"+",
"keyTabFilePath",
")",
";",
"}",
"String",
"principal",
"=",
"this",
".",
"config",
".",
"getString",
"(",
"GobblinYarnConfigurationKeys",
".",
"KEYTAB_PRINCIPAL_NAME",
")",
";",
"if",
"(",
"Strings",
".",
"isNullOrEmpty",
"(",
"principal",
")",
")",
"{",
"principal",
"=",
"this",
".",
"loginUser",
".",
"getShortUserName",
"(",
")",
"+",
"\"/localhost@LOCALHOST\"",
";",
"}",
"Configuration",
"conf",
"=",
"new",
"Configuration",
"(",
")",
";",
"conf",
".",
"set",
"(",
"\"hadoop.security.authentication\"",
",",
"UserGroupInformation",
".",
"AuthenticationMethod",
".",
"KERBEROS",
".",
"toString",
"(",
")",
".",
"toLowerCase",
"(",
")",
")",
";",
"UserGroupInformation",
".",
"setConfiguration",
"(",
"conf",
")",
";",
"UserGroupInformation",
".",
"loginUserFromKeytab",
"(",
"principal",
",",
"keyTabFilePath",
")",
";",
"LOGGER",
".",
"info",
"(",
"String",
".",
"format",
"(",
"\"Logged in from keytab file %s using principal %s\"",
",",
"keyTabFilePath",
",",
"principal",
")",
")",
";",
"this",
".",
"loginUser",
"=",
"UserGroupInformation",
".",
"getLoginUser",
"(",
")",
";",
"getNewDelegationTokenForLoginUser",
"(",
")",
";",
"writeDelegationTokenToFile",
"(",
")",
";",
"if",
"(",
"!",
"this",
".",
"firstLogin",
")",
"{",
"// Send a message to the controller and all the participants",
"sendTokenFileUpdatedMessage",
"(",
"InstanceType",
".",
"CONTROLLER",
")",
";",
"sendTokenFileUpdatedMessage",
"(",
"InstanceType",
".",
"PARTICIPANT",
")",
";",
"}",
"}"
] | Login the user from a given keytab file. | [
"Login",
"the",
"user",
"from",
"a",
"given",
"keytab",
"file",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java#L210-L242 |
25,774 | apache/incubator-gobblin | gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java | YarnAppSecurityManager.writeDelegationTokenToFile | @VisibleForTesting
synchronized void writeDelegationTokenToFile() throws IOException {
if (this.fs.exists(this.tokenFilePath)) {
LOGGER.info("Deleting existing token file " + this.tokenFilePath);
this.fs.delete(this.tokenFilePath, false);
}
LOGGER.info("Writing new or renewed token to token file " + this.tokenFilePath);
YarnHelixUtils.writeTokenToFile(this.token, this.tokenFilePath, this.fs.getConf());
// Only grand access to the token file to the login user
this.fs.setPermission(this.tokenFilePath, new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE));
} | java | @VisibleForTesting
synchronized void writeDelegationTokenToFile() throws IOException {
if (this.fs.exists(this.tokenFilePath)) {
LOGGER.info("Deleting existing token file " + this.tokenFilePath);
this.fs.delete(this.tokenFilePath, false);
}
LOGGER.info("Writing new or renewed token to token file " + this.tokenFilePath);
YarnHelixUtils.writeTokenToFile(this.token, this.tokenFilePath, this.fs.getConf());
// Only grand access to the token file to the login user
this.fs.setPermission(this.tokenFilePath, new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE));
} | [
"@",
"VisibleForTesting",
"synchronized",
"void",
"writeDelegationTokenToFile",
"(",
")",
"throws",
"IOException",
"{",
"if",
"(",
"this",
".",
"fs",
".",
"exists",
"(",
"this",
".",
"tokenFilePath",
")",
")",
"{",
"LOGGER",
".",
"info",
"(",
"\"Deleting existing token file \"",
"+",
"this",
".",
"tokenFilePath",
")",
";",
"this",
".",
"fs",
".",
"delete",
"(",
"this",
".",
"tokenFilePath",
",",
"false",
")",
";",
"}",
"LOGGER",
".",
"info",
"(",
"\"Writing new or renewed token to token file \"",
"+",
"this",
".",
"tokenFilePath",
")",
";",
"YarnHelixUtils",
".",
"writeTokenToFile",
"(",
"this",
".",
"token",
",",
"this",
".",
"tokenFilePath",
",",
"this",
".",
"fs",
".",
"getConf",
"(",
")",
")",
";",
"// Only grand access to the token file to the login user",
"this",
".",
"fs",
".",
"setPermission",
"(",
"this",
".",
"tokenFilePath",
",",
"new",
"FsPermission",
"(",
"FsAction",
".",
"READ_WRITE",
",",
"FsAction",
".",
"NONE",
",",
"FsAction",
".",
"NONE",
")",
")",
";",
"}"
] | Write the current delegation token to the token file. | [
"Write",
"the",
"current",
"delegation",
"token",
"to",
"the",
"token",
"file",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-yarn/src/main/java/org/apache/gobblin/yarn/YarnAppSecurityManager.java#L247-L258 |
25,775 | apache/incubator-gobblin | gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/GPGFileEncryptor.java | GPGFileEncryptor.symmetricKeyAlgorithmNameToTag | private static int symmetricKeyAlgorithmNameToTag(String cipherName) {
// Use CAST5 if no cipher specified
if (StringUtils.isEmpty(cipherName)) {
return PGPEncryptedData.CAST5;
}
Set<Field> fields = ReflectionUtils.getAllFields(PGPEncryptedData.class, ReflectionUtils.withName(cipherName));
if (fields.isEmpty()) {
throw new RuntimeException("Could not find tag for cipher name " + cipherName);
}
try {
return fields.iterator().next().getInt(null);
} catch (IllegalAccessException e) {
throw new RuntimeException("Could not access field " + cipherName, e);
}
} | java | private static int symmetricKeyAlgorithmNameToTag(String cipherName) {
// Use CAST5 if no cipher specified
if (StringUtils.isEmpty(cipherName)) {
return PGPEncryptedData.CAST5;
}
Set<Field> fields = ReflectionUtils.getAllFields(PGPEncryptedData.class, ReflectionUtils.withName(cipherName));
if (fields.isEmpty()) {
throw new RuntimeException("Could not find tag for cipher name " + cipherName);
}
try {
return fields.iterator().next().getInt(null);
} catch (IllegalAccessException e) {
throw new RuntimeException("Could not access field " + cipherName, e);
}
} | [
"private",
"static",
"int",
"symmetricKeyAlgorithmNameToTag",
"(",
"String",
"cipherName",
")",
"{",
"// Use CAST5 if no cipher specified",
"if",
"(",
"StringUtils",
".",
"isEmpty",
"(",
"cipherName",
")",
")",
"{",
"return",
"PGPEncryptedData",
".",
"CAST5",
";",
"}",
"Set",
"<",
"Field",
">",
"fields",
"=",
"ReflectionUtils",
".",
"getAllFields",
"(",
"PGPEncryptedData",
".",
"class",
",",
"ReflectionUtils",
".",
"withName",
"(",
"cipherName",
")",
")",
";",
"if",
"(",
"fields",
".",
"isEmpty",
"(",
")",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Could not find tag for cipher name \"",
"+",
"cipherName",
")",
";",
"}",
"try",
"{",
"return",
"fields",
".",
"iterator",
"(",
")",
".",
"next",
"(",
")",
".",
"getInt",
"(",
"null",
")",
";",
"}",
"catch",
"(",
"IllegalAccessException",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"\"Could not access field \"",
"+",
"cipherName",
",",
"e",
")",
";",
"}",
"}"
] | Convert a string cipher name to the integer tag used by GPG
@param cipherName the cipher name
@return integer tag for the cipher | [
"Convert",
"a",
"string",
"cipher",
"name",
"to",
"the",
"integer",
"tag",
"used",
"by",
"GPG"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-crypto/src/main/java/org/apache/gobblin/crypto/GPGFileEncryptor.java#L143-L160 |
25,776 | apache/incubator-gobblin | gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/source/DatasetCleanerSource.java | DatasetCleanerSource.getWorkunits | @Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
Config sourceConfig = ConfigUtils.getConfigOrEmpty(config, DATASET_CLEANER_SOURCE_PREFIX);
List<String> configurationNames = ConfigUtils.getStringList(config, DATASET_CLEANER_CONFIGURATIONS);
// use a dummy configuration name if none set
if (configurationNames.isEmpty()) {
configurationNames = ImmutableList.of("DummyConfig");
}
for (String configurationName: configurationNames) {
WorkUnit workUnit = WorkUnit.createEmpty();
// specific configuration prefixed by the configuration name has precedence over the source specific configuration
// and the source specific configuration has precedence over the general configuration
Config wuConfig = ConfigUtils.getConfigOrEmpty(sourceConfig, configurationName).withFallback(sourceConfig)
.withFallback(config);
workUnit.setProps(ConfigUtils.configToProperties(wuConfig), new Properties());
TaskUtils.setTaskFactoryClass(workUnit, DatasetCleanerTaskFactory.class);
workUnits.add(workUnit);
}
return workUnits;
} | java | @Override
public List<WorkUnit> getWorkunits(SourceState state) {
List<WorkUnit> workUnits = Lists.newArrayList();
Config config = ConfigUtils.propertiesToConfig(state.getProperties());
Config sourceConfig = ConfigUtils.getConfigOrEmpty(config, DATASET_CLEANER_SOURCE_PREFIX);
List<String> configurationNames = ConfigUtils.getStringList(config, DATASET_CLEANER_CONFIGURATIONS);
// use a dummy configuration name if none set
if (configurationNames.isEmpty()) {
configurationNames = ImmutableList.of("DummyConfig");
}
for (String configurationName: configurationNames) {
WorkUnit workUnit = WorkUnit.createEmpty();
// specific configuration prefixed by the configuration name has precedence over the source specific configuration
// and the source specific configuration has precedence over the general configuration
Config wuConfig = ConfigUtils.getConfigOrEmpty(sourceConfig, configurationName).withFallback(sourceConfig)
.withFallback(config);
workUnit.setProps(ConfigUtils.configToProperties(wuConfig), new Properties());
TaskUtils.setTaskFactoryClass(workUnit, DatasetCleanerTaskFactory.class);
workUnits.add(workUnit);
}
return workUnits;
} | [
"@",
"Override",
"public",
"List",
"<",
"WorkUnit",
">",
"getWorkunits",
"(",
"SourceState",
"state",
")",
"{",
"List",
"<",
"WorkUnit",
">",
"workUnits",
"=",
"Lists",
".",
"newArrayList",
"(",
")",
";",
"Config",
"config",
"=",
"ConfigUtils",
".",
"propertiesToConfig",
"(",
"state",
".",
"getProperties",
"(",
")",
")",
";",
"Config",
"sourceConfig",
"=",
"ConfigUtils",
".",
"getConfigOrEmpty",
"(",
"config",
",",
"DATASET_CLEANER_SOURCE_PREFIX",
")",
";",
"List",
"<",
"String",
">",
"configurationNames",
"=",
"ConfigUtils",
".",
"getStringList",
"(",
"config",
",",
"DATASET_CLEANER_CONFIGURATIONS",
")",
";",
"// use a dummy configuration name if none set",
"if",
"(",
"configurationNames",
".",
"isEmpty",
"(",
")",
")",
"{",
"configurationNames",
"=",
"ImmutableList",
".",
"of",
"(",
"\"DummyConfig\"",
")",
";",
"}",
"for",
"(",
"String",
"configurationName",
":",
"configurationNames",
")",
"{",
"WorkUnit",
"workUnit",
"=",
"WorkUnit",
".",
"createEmpty",
"(",
")",
";",
"// specific configuration prefixed by the configuration name has precedence over the source specific configuration",
"// and the source specific configuration has precedence over the general configuration",
"Config",
"wuConfig",
"=",
"ConfigUtils",
".",
"getConfigOrEmpty",
"(",
"sourceConfig",
",",
"configurationName",
")",
".",
"withFallback",
"(",
"sourceConfig",
")",
".",
"withFallback",
"(",
"config",
")",
";",
"workUnit",
".",
"setProps",
"(",
"ConfigUtils",
".",
"configToProperties",
"(",
"wuConfig",
")",
",",
"new",
"Properties",
"(",
")",
")",
";",
"TaskUtils",
".",
"setTaskFactoryClass",
"(",
"workUnit",
",",
"DatasetCleanerTaskFactory",
".",
"class",
")",
";",
"workUnits",
".",
"add",
"(",
"workUnit",
")",
";",
"}",
"return",
"workUnits",
";",
"}"
] | Create a work unit for each configuration defined or a single work unit if no configurations are defined
@param state see {@link org.apache.gobblin.configuration.SourceState}
@return list of workunits | [
"Create",
"a",
"work",
"unit",
"for",
"each",
"configuration",
"defined",
"or",
"a",
"single",
"work",
"unit",
"if",
"no",
"configurations",
"are",
"defined"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-data-management/src/main/java/org/apache/gobblin/data/management/retention/source/DatasetCleanerSource.java#L69-L95 |
25,777 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.write | public Future<WriteResponse> write (Batch<String> batch, WriteCallback callback) {
Timer.Context context = writeTimer.time();
int returnCode = 0;
LOG.info ("Dispatching batch " + batch.getId());
recordsAttempted.mark(batch.getRecords().size());
try {
String encoded = encodeBatch(batch);
returnCode = request (encoded);
WriteResponse<Integer> response = WRITE_RESPONSE_WRAPPER.wrap(returnCode);
callback.onSuccess(response);
bytesWritten.mark(encoded.length());
recordsSuccess.mark(batch.getRecords().size());
} catch (Exception e) {
LOG.error("Dispatching batch " + batch.getId() + " failed :" + e.toString());
callback.onFailure(e);
recordsFailed.mark(batch.getRecords().size());
}
context.close();
Future<Integer> future = Futures.immediateFuture(returnCode);
return new WriteResponseFuture<>(future, WRITE_RESPONSE_WRAPPER);
} | java | public Future<WriteResponse> write (Batch<String> batch, WriteCallback callback) {
Timer.Context context = writeTimer.time();
int returnCode = 0;
LOG.info ("Dispatching batch " + batch.getId());
recordsAttempted.mark(batch.getRecords().size());
try {
String encoded = encodeBatch(batch);
returnCode = request (encoded);
WriteResponse<Integer> response = WRITE_RESPONSE_WRAPPER.wrap(returnCode);
callback.onSuccess(response);
bytesWritten.mark(encoded.length());
recordsSuccess.mark(batch.getRecords().size());
} catch (Exception e) {
LOG.error("Dispatching batch " + batch.getId() + " failed :" + e.toString());
callback.onFailure(e);
recordsFailed.mark(batch.getRecords().size());
}
context.close();
Future<Integer> future = Futures.immediateFuture(returnCode);
return new WriteResponseFuture<>(future, WRITE_RESPONSE_WRAPPER);
} | [
"public",
"Future",
"<",
"WriteResponse",
">",
"write",
"(",
"Batch",
"<",
"String",
">",
"batch",
",",
"WriteCallback",
"callback",
")",
"{",
"Timer",
".",
"Context",
"context",
"=",
"writeTimer",
".",
"time",
"(",
")",
";",
"int",
"returnCode",
"=",
"0",
";",
"LOG",
".",
"info",
"(",
"\"Dispatching batch \"",
"+",
"batch",
".",
"getId",
"(",
")",
")",
";",
"recordsAttempted",
".",
"mark",
"(",
"batch",
".",
"getRecords",
"(",
")",
".",
"size",
"(",
")",
")",
";",
"try",
"{",
"String",
"encoded",
"=",
"encodeBatch",
"(",
"batch",
")",
";",
"returnCode",
"=",
"request",
"(",
"encoded",
")",
";",
"WriteResponse",
"<",
"Integer",
">",
"response",
"=",
"WRITE_RESPONSE_WRAPPER",
".",
"wrap",
"(",
"returnCode",
")",
";",
"callback",
".",
"onSuccess",
"(",
"response",
")",
";",
"bytesWritten",
".",
"mark",
"(",
"encoded",
".",
"length",
"(",
")",
")",
";",
"recordsSuccess",
".",
"mark",
"(",
"batch",
".",
"getRecords",
"(",
")",
".",
"size",
"(",
")",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"LOG",
".",
"error",
"(",
"\"Dispatching batch \"",
"+",
"batch",
".",
"getId",
"(",
")",
"+",
"\" failed :\"",
"+",
"e",
".",
"toString",
"(",
")",
")",
";",
"callback",
".",
"onFailure",
"(",
"e",
")",
";",
"recordsFailed",
".",
"mark",
"(",
"batch",
".",
"getRecords",
"(",
")",
".",
"size",
"(",
")",
")",
";",
"}",
"context",
".",
"close",
"(",
")",
";",
"Future",
"<",
"Integer",
">",
"future",
"=",
"Futures",
".",
"immediateFuture",
"(",
"returnCode",
")",
";",
"return",
"new",
"WriteResponseFuture",
"<>",
"(",
"future",
",",
"WRITE_RESPONSE_WRAPPER",
")",
";",
"}"
] | Write a whole batch to eventhub | [
"Write",
"a",
"whole",
"batch",
"to",
"eventhub"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L153-L174 |
25,778 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.write | public WriteResponse write (String record) throws IOException {
recordsAttempted.mark();
String encoded = encodeRecord(record);
int returnCode = request (encoded);
recordsSuccess.mark();
bytesWritten.mark(encoded.length());
return WRITE_RESPONSE_WRAPPER.wrap(returnCode);
} | java | public WriteResponse write (String record) throws IOException {
recordsAttempted.mark();
String encoded = encodeRecord(record);
int returnCode = request (encoded);
recordsSuccess.mark();
bytesWritten.mark(encoded.length());
return WRITE_RESPONSE_WRAPPER.wrap(returnCode);
} | [
"public",
"WriteResponse",
"write",
"(",
"String",
"record",
")",
"throws",
"IOException",
"{",
"recordsAttempted",
".",
"mark",
"(",
")",
";",
"String",
"encoded",
"=",
"encodeRecord",
"(",
"record",
")",
";",
"int",
"returnCode",
"=",
"request",
"(",
"encoded",
")",
";",
"recordsSuccess",
".",
"mark",
"(",
")",
";",
"bytesWritten",
".",
"mark",
"(",
"encoded",
".",
"length",
"(",
")",
")",
";",
"return",
"WRITE_RESPONSE_WRAPPER",
".",
"wrap",
"(",
"returnCode",
")",
";",
"}"
] | Write a single record to eventhub | [
"Write",
"a",
"single",
"record",
"to",
"eventhub"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L179-L186 |
25,779 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.refreshSignature | public void refreshSignature () {
if (postStartTimestamp == 0 || (System.nanoTime() - postStartTimestamp) > Duration.ofMinutes(sigExpireInMinute).toNanos()) {
// generate signature
try {
signature = SharedAccessSignatureTokenProvider
.generateSharedAccessSignature(sasKeyName, sasKey, namespaceName, Duration.ofMinutes(sigExpireInMinute));
postStartTimestamp = System.nanoTime();
LOG.info ("Signature is refreshing: " + signature);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | java | public void refreshSignature () {
if (postStartTimestamp == 0 || (System.nanoTime() - postStartTimestamp) > Duration.ofMinutes(sigExpireInMinute).toNanos()) {
// generate signature
try {
signature = SharedAccessSignatureTokenProvider
.generateSharedAccessSignature(sasKeyName, sasKey, namespaceName, Duration.ofMinutes(sigExpireInMinute));
postStartTimestamp = System.nanoTime();
LOG.info ("Signature is refreshing: " + signature);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
} | [
"public",
"void",
"refreshSignature",
"(",
")",
"{",
"if",
"(",
"postStartTimestamp",
"==",
"0",
"||",
"(",
"System",
".",
"nanoTime",
"(",
")",
"-",
"postStartTimestamp",
")",
">",
"Duration",
".",
"ofMinutes",
"(",
"sigExpireInMinute",
")",
".",
"toNanos",
"(",
")",
")",
"{",
"// generate signature",
"try",
"{",
"signature",
"=",
"SharedAccessSignatureTokenProvider",
".",
"generateSharedAccessSignature",
"(",
"sasKeyName",
",",
"sasKey",
",",
"namespaceName",
",",
"Duration",
".",
"ofMinutes",
"(",
"sigExpireInMinute",
")",
")",
";",
"postStartTimestamp",
"=",
"System",
".",
"nanoTime",
"(",
")",
";",
"LOG",
".",
"info",
"(",
"\"Signature is refreshing: \"",
"+",
"signature",
")",
";",
"}",
"catch",
"(",
"Exception",
"e",
")",
"{",
"throw",
"new",
"RuntimeException",
"(",
"e",
")",
";",
"}",
"}",
"}"
] | A signature which contains the duration.
After the duration is expired, the signature becomes invalid | [
"A",
"signature",
"which",
"contains",
"the",
"duration",
".",
"After",
"the",
"duration",
"is",
"expired",
"the",
"signature",
"becomes",
"invalid"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L192-L204 |
25,780 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.request | private int request (String encoded) throws IOException {
refreshSignature();
HttpPost httpPost = new HttpPost(targetURI);
httpPost.setHeader("Content-type", "application/vnd.microsoft.servicebus.json");
httpPost.setHeader("Authorization", signature);
httpPost.setHeader("Host", namespaceName + ".servicebus.windows.net ");
StringEntity entity = new StringEntity(encoded);
httpPost.setEntity(entity);
HttpResponse response = httpclient.execute(httpPost);
StatusLine status = response.getStatusLine();
HttpEntity entity2 = response.getEntity();
// do something useful with the response body
// and ensure it is fully consumed
EntityUtils.consume(entity2);
int returnCode = status.getStatusCode();
if (returnCode != HttpStatus.SC_CREATED) {
LOG.error (new IOException(status.getReasonPhrase()).toString());
throw new IOException(status.getReasonPhrase());
}
return returnCode;
} | java | private int request (String encoded) throws IOException {
refreshSignature();
HttpPost httpPost = new HttpPost(targetURI);
httpPost.setHeader("Content-type", "application/vnd.microsoft.servicebus.json");
httpPost.setHeader("Authorization", signature);
httpPost.setHeader("Host", namespaceName + ".servicebus.windows.net ");
StringEntity entity = new StringEntity(encoded);
httpPost.setEntity(entity);
HttpResponse response = httpclient.execute(httpPost);
StatusLine status = response.getStatusLine();
HttpEntity entity2 = response.getEntity();
// do something useful with the response body
// and ensure it is fully consumed
EntityUtils.consume(entity2);
int returnCode = status.getStatusCode();
if (returnCode != HttpStatus.SC_CREATED) {
LOG.error (new IOException(status.getReasonPhrase()).toString());
throw new IOException(status.getReasonPhrase());
}
return returnCode;
} | [
"private",
"int",
"request",
"(",
"String",
"encoded",
")",
"throws",
"IOException",
"{",
"refreshSignature",
"(",
")",
";",
"HttpPost",
"httpPost",
"=",
"new",
"HttpPost",
"(",
"targetURI",
")",
";",
"httpPost",
".",
"setHeader",
"(",
"\"Content-type\"",
",",
"\"application/vnd.microsoft.servicebus.json\"",
")",
";",
"httpPost",
".",
"setHeader",
"(",
"\"Authorization\"",
",",
"signature",
")",
";",
"httpPost",
".",
"setHeader",
"(",
"\"Host\"",
",",
"namespaceName",
"+",
"\".servicebus.windows.net \"",
")",
";",
"StringEntity",
"entity",
"=",
"new",
"StringEntity",
"(",
"encoded",
")",
";",
"httpPost",
".",
"setEntity",
"(",
"entity",
")",
";",
"HttpResponse",
"response",
"=",
"httpclient",
".",
"execute",
"(",
"httpPost",
")",
";",
"StatusLine",
"status",
"=",
"response",
".",
"getStatusLine",
"(",
")",
";",
"HttpEntity",
"entity2",
"=",
"response",
".",
"getEntity",
"(",
")",
";",
"// do something useful with the response body",
"// and ensure it is fully consumed",
"EntityUtils",
".",
"consume",
"(",
"entity2",
")",
";",
"int",
"returnCode",
"=",
"status",
".",
"getStatusCode",
"(",
")",
";",
"if",
"(",
"returnCode",
"!=",
"HttpStatus",
".",
"SC_CREATED",
")",
"{",
"LOG",
".",
"error",
"(",
"new",
"IOException",
"(",
"status",
".",
"getReasonPhrase",
"(",
")",
")",
".",
"toString",
"(",
")",
")",
";",
"throw",
"new",
"IOException",
"(",
"status",
".",
"getReasonPhrase",
"(",
")",
")",
";",
"}",
"return",
"returnCode",
";",
"}"
] | Send an encoded string to the Eventhub using post method | [
"Send",
"an",
"encoded",
"string",
"to",
"the",
"Eventhub",
"using",
"post",
"method"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L209-L233 |
25,781 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.encodeBatch | private String encodeBatch (Batch<String> batch) throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
List<String> records = batch.getRecords();
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
for (String record: records) {
arrayList.add(new EventhubRequest(record));
}
return mapper.writeValueAsString (arrayList);
} | java | private String encodeBatch (Batch<String> batch) throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
List<String> records = batch.getRecords();
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
for (String record: records) {
arrayList.add(new EventhubRequest(record));
}
return mapper.writeValueAsString (arrayList);
} | [
"private",
"String",
"encodeBatch",
"(",
"Batch",
"<",
"String",
">",
"batch",
")",
"throws",
"IOException",
"{",
"// Convert original json object to a new json object with format {\"Body\": \"originalJson\"}",
"// Add new json object to an array and send the whole array to eventhub using REST api",
"// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events",
"List",
"<",
"String",
">",
"records",
"=",
"batch",
".",
"getRecords",
"(",
")",
";",
"ArrayList",
"<",
"EventhubRequest",
">",
"arrayList",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"for",
"(",
"String",
"record",
":",
"records",
")",
"{",
"arrayList",
".",
"add",
"(",
"new",
"EventhubRequest",
"(",
"record",
")",
")",
";",
"}",
"return",
"mapper",
".",
"writeValueAsString",
"(",
"arrayList",
")",
";",
"}"
] | Each record of batch is wrapped by a 'Body' json object
put this new object into an array, encode the whole array | [
"Each",
"record",
"of",
"batch",
"is",
"wrapped",
"by",
"a",
"Body",
"json",
"object",
"put",
"this",
"new",
"object",
"into",
"an",
"array",
"encode",
"the",
"whole",
"array"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L239-L251 |
25,782 | apache/incubator-gobblin | gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java | EventhubDataWriter.encodeRecord | private String encodeRecord (String record)throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
arrayList.add(new EventhubRequest(record));
return mapper.writeValueAsString (arrayList);
} | java | private String encodeRecord (String record)throws IOException {
// Convert original json object to a new json object with format {"Body": "originalJson"}
// Add new json object to an array and send the whole array to eventhub using REST api
// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events
ArrayList<EventhubRequest> arrayList = new ArrayList<>();
arrayList.add(new EventhubRequest(record));
return mapper.writeValueAsString (arrayList);
} | [
"private",
"String",
"encodeRecord",
"(",
"String",
"record",
")",
"throws",
"IOException",
"{",
"// Convert original json object to a new json object with format {\"Body\": \"originalJson\"}",
"// Add new json object to an array and send the whole array to eventhub using REST api",
"// Refer to https://docs.microsoft.com/en-us/rest/api/eventhub/send-batch-events",
"ArrayList",
"<",
"EventhubRequest",
">",
"arrayList",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"arrayList",
".",
"add",
"(",
"new",
"EventhubRequest",
"(",
"record",
")",
")",
";",
"return",
"mapper",
".",
"writeValueAsString",
"(",
"arrayList",
")",
";",
"}"
] | A single record is wrapped by a 'Body' json object
encode this json object | [
"A",
"single",
"record",
"is",
"wrapped",
"by",
"a",
"Body",
"json",
"object",
"encode",
"this",
"json",
"object"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-modules/gobblin-eventhub/src/main/java/org/apache/gobblin/eventhub/writer/EventhubDataWriter.java#L257-L265 |
25,783 | apache/incubator-gobblin | gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java | FineGrainedWatermarkTracker.checkStability | private void checkStability() {
if ((_watermarksInserted.getCount() - _watermarksSwept.getCount()) > _watermarkLagThreshold) {
log.error("Setting abort flag for Watermark tracking because the lag between the "
+ "watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}",
_watermarksInserted.getCount(), _watermarksSwept.getCount(), _watermarkLagThreshold);
_abort.set(true);
}
} | java | private void checkStability() {
if ((_watermarksInserted.getCount() - _watermarksSwept.getCount()) > _watermarkLagThreshold) {
log.error("Setting abort flag for Watermark tracking because the lag between the "
+ "watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}",
_watermarksInserted.getCount(), _watermarksSwept.getCount(), _watermarkLagThreshold);
_abort.set(true);
}
} | [
"private",
"void",
"checkStability",
"(",
")",
"{",
"if",
"(",
"(",
"_watermarksInserted",
".",
"getCount",
"(",
")",
"-",
"_watermarksSwept",
".",
"getCount",
"(",
")",
")",
">",
"_watermarkLagThreshold",
")",
"{",
"log",
".",
"error",
"(",
"\"Setting abort flag for Watermark tracking because the lag between the \"",
"+",
"\"watermarksInserted: {} and watermarksSwept: {} is greater than the threshold: {}\"",
",",
"_watermarksInserted",
".",
"getCount",
"(",
")",
",",
"_watermarksSwept",
".",
"getCount",
"(",
")",
",",
"_watermarkLagThreshold",
")",
";",
"_abort",
".",
"set",
"(",
"true",
")",
";",
"}",
"}"
] | Check if the memory footprint of the data structure is within bounds | [
"Check",
"if",
"the",
"memory",
"footprint",
"of",
"the",
"data",
"structure",
"is",
"within",
"bounds"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java#L173-L180 |
25,784 | apache/incubator-gobblin | gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java | FineGrainedWatermarkTracker.start | public synchronized void start() {
if (!_started.get()) {
_executorService = new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(FineGrainedWatermarkTracker.class))));
_executorService.scheduleAtFixedRate(_sweeper, 0, _sweepIntervalMillis, TimeUnit.MILLISECONDS);
_executorService.scheduleAtFixedRate(_stabilityChecker, 0, _stabilityCheckIntervalMillis, TimeUnit.MILLISECONDS);
}
_started.set(true);
} | java | public synchronized void start() {
if (!_started.get()) {
_executorService = new ScheduledThreadPoolExecutor(1,
ExecutorsUtils.newThreadFactory(Optional.of(LoggerFactory.getLogger(FineGrainedWatermarkTracker.class))));
_executorService.scheduleAtFixedRate(_sweeper, 0, _sweepIntervalMillis, TimeUnit.MILLISECONDS);
_executorService.scheduleAtFixedRate(_stabilityChecker, 0, _stabilityCheckIntervalMillis, TimeUnit.MILLISECONDS);
}
_started.set(true);
} | [
"public",
"synchronized",
"void",
"start",
"(",
")",
"{",
"if",
"(",
"!",
"_started",
".",
"get",
"(",
")",
")",
"{",
"_executorService",
"=",
"new",
"ScheduledThreadPoolExecutor",
"(",
"1",
",",
"ExecutorsUtils",
".",
"newThreadFactory",
"(",
"Optional",
".",
"of",
"(",
"LoggerFactory",
".",
"getLogger",
"(",
"FineGrainedWatermarkTracker",
".",
"class",
")",
")",
")",
")",
";",
"_executorService",
".",
"scheduleAtFixedRate",
"(",
"_sweeper",
",",
"0",
",",
"_sweepIntervalMillis",
",",
"TimeUnit",
".",
"MILLISECONDS",
")",
";",
"_executorService",
".",
"scheduleAtFixedRate",
"(",
"_stabilityChecker",
",",
"0",
",",
"_stabilityCheckIntervalMillis",
",",
"TimeUnit",
".",
"MILLISECONDS",
")",
";",
"}",
"_started",
".",
"set",
"(",
"true",
")",
";",
"}"
] | Schedule the sweeper and stability checkers | [
"Schedule",
"the",
"sweeper",
"and",
"stability",
"checkers"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java#L225-L233 |
25,785 | apache/incubator-gobblin | gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java | FineGrainedWatermarkTracker.sweep | @VisibleForTesting
synchronized int sweep() {
long startTime = System.nanoTime();
int swept = 0;
for (Map.Entry<String, Deque<AcknowledgableWatermark>> entry : _watermarksMap.entrySet()) {
Deque<AcknowledgableWatermark> watermarks = entry.getValue();
/**
* Keep popping acked elements from the front as long as their next element is also acked.
* So: Acked_A -> Acked_B -> Not-Acked_C -> ... becomes
* Acked_B -> Not-Acked_C -> ...
*
* We keep the acked element around because that represents the highest contiguous acked watermark.
*/
boolean continueIteration = true;
while (continueIteration) {
Iterator<AcknowledgableWatermark> iter = watermarks.iterator();
if (!iter.hasNext()) { // null
continueIteration = false;
continue;
}
AcknowledgableWatermark first = iter.next();
if (first.isAcked()) {
if (!iter.hasNext()) { // Acked_A -> null
continueIteration = false;
continue;
}
AcknowledgableWatermark second = iter.next();
if ((second != null) && second.isAcked()) { // Acked_A -> Acked_B -> ...
watermarks.pop();
swept++;
} else { // Acked_A -> Not_Acked_B
continueIteration = false;
}
} else { // Not_Acked_A -> ..
continueIteration = false;
}
}
}
long duration = (System.nanoTime() - startTime)/ MILLIS_TO_NANOS;
log.debug("Swept {} watermarks in {} millis", swept, duration);
_watermarksSwept.mark(swept);
return swept;
} | java | @VisibleForTesting
synchronized int sweep() {
long startTime = System.nanoTime();
int swept = 0;
for (Map.Entry<String, Deque<AcknowledgableWatermark>> entry : _watermarksMap.entrySet()) {
Deque<AcknowledgableWatermark> watermarks = entry.getValue();
/**
* Keep popping acked elements from the front as long as their next element is also acked.
* So: Acked_A -> Acked_B -> Not-Acked_C -> ... becomes
* Acked_B -> Not-Acked_C -> ...
*
* We keep the acked element around because that represents the highest contiguous acked watermark.
*/
boolean continueIteration = true;
while (continueIteration) {
Iterator<AcknowledgableWatermark> iter = watermarks.iterator();
if (!iter.hasNext()) { // null
continueIteration = false;
continue;
}
AcknowledgableWatermark first = iter.next();
if (first.isAcked()) {
if (!iter.hasNext()) { // Acked_A -> null
continueIteration = false;
continue;
}
AcknowledgableWatermark second = iter.next();
if ((second != null) && second.isAcked()) { // Acked_A -> Acked_B -> ...
watermarks.pop();
swept++;
} else { // Acked_A -> Not_Acked_B
continueIteration = false;
}
} else { // Not_Acked_A -> ..
continueIteration = false;
}
}
}
long duration = (System.nanoTime() - startTime)/ MILLIS_TO_NANOS;
log.debug("Swept {} watermarks in {} millis", swept, duration);
_watermarksSwept.mark(swept);
return swept;
} | [
"@",
"VisibleForTesting",
"synchronized",
"int",
"sweep",
"(",
")",
"{",
"long",
"startTime",
"=",
"System",
".",
"nanoTime",
"(",
")",
";",
"int",
"swept",
"=",
"0",
";",
"for",
"(",
"Map",
".",
"Entry",
"<",
"String",
",",
"Deque",
"<",
"AcknowledgableWatermark",
">",
">",
"entry",
":",
"_watermarksMap",
".",
"entrySet",
"(",
")",
")",
"{",
"Deque",
"<",
"AcknowledgableWatermark",
">",
"watermarks",
"=",
"entry",
".",
"getValue",
"(",
")",
";",
"/**\n * Keep popping acked elements from the front as long as their next element is also acked.\n * So: Acked_A -> Acked_B -> Not-Acked_C -> ... becomes\n * Acked_B -> Not-Acked_C -> ...\n *\n * We keep the acked element around because that represents the highest contiguous acked watermark.\n */",
"boolean",
"continueIteration",
"=",
"true",
";",
"while",
"(",
"continueIteration",
")",
"{",
"Iterator",
"<",
"AcknowledgableWatermark",
">",
"iter",
"=",
"watermarks",
".",
"iterator",
"(",
")",
";",
"if",
"(",
"!",
"iter",
".",
"hasNext",
"(",
")",
")",
"{",
"// null",
"continueIteration",
"=",
"false",
";",
"continue",
";",
"}",
"AcknowledgableWatermark",
"first",
"=",
"iter",
".",
"next",
"(",
")",
";",
"if",
"(",
"first",
".",
"isAcked",
"(",
")",
")",
"{",
"if",
"(",
"!",
"iter",
".",
"hasNext",
"(",
")",
")",
"{",
"// Acked_A -> null",
"continueIteration",
"=",
"false",
";",
"continue",
";",
"}",
"AcknowledgableWatermark",
"second",
"=",
"iter",
".",
"next",
"(",
")",
";",
"if",
"(",
"(",
"second",
"!=",
"null",
")",
"&&",
"second",
".",
"isAcked",
"(",
")",
")",
"{",
"// Acked_A -> Acked_B -> ...",
"watermarks",
".",
"pop",
"(",
")",
";",
"swept",
"++",
";",
"}",
"else",
"{",
"// Acked_A -> Not_Acked_B",
"continueIteration",
"=",
"false",
";",
"}",
"}",
"else",
"{",
"// Not_Acked_A -> ..",
"continueIteration",
"=",
"false",
";",
"}",
"}",
"}",
"long",
"duration",
"=",
"(",
"System",
".",
"nanoTime",
"(",
")",
"-",
"startTime",
")",
"/",
"MILLIS_TO_NANOS",
";",
"log",
".",
"debug",
"(",
"\"Swept {} watermarks in {} millis\"",
",",
"swept",
",",
"duration",
")",
";",
"_watermarksSwept",
".",
"mark",
"(",
"swept",
")",
";",
"return",
"swept",
";",
"}"
] | A helper method to garbage collect acknowledged watermarks
@return number of elements collected | [
"A",
"helper",
"method",
"to",
"garbage",
"collect",
"acknowledged",
"watermarks"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-core-base/src/main/java/org/apache/gobblin/writer/FineGrainedWatermarkTracker.java#L251-L294 |
25,786 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/dataset/PartitionDescriptor.java | PartitionDescriptor.toPartitionJsonList | public static String toPartitionJsonList(List<PartitionDescriptor> descriptors) {
return Descriptor.GSON.toJson(descriptors, DESCRIPTOR_LIST_TYPE);
} | java | public static String toPartitionJsonList(List<PartitionDescriptor> descriptors) {
return Descriptor.GSON.toJson(descriptors, DESCRIPTOR_LIST_TYPE);
} | [
"public",
"static",
"String",
"toPartitionJsonList",
"(",
"List",
"<",
"PartitionDescriptor",
">",
"descriptors",
")",
"{",
"return",
"Descriptor",
".",
"GSON",
".",
"toJson",
"(",
"descriptors",
",",
"DESCRIPTOR_LIST_TYPE",
")",
";",
"}"
] | Serialize a list of partition descriptors as json string | [
"Serialize",
"a",
"list",
"of",
"partition",
"descriptors",
"as",
"json",
"string"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/dataset/PartitionDescriptor.java#L77-L79 |
25,787 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java | CompactionJobConfigurator.createJob | public Job createJob(FileSystemDataset dataset) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state);
// Turn on mapreduce output compression by default
if (conf.get("mapreduce.output.fileoutputformat.compress") == null && conf.get("mapred.output.compress") == null) {
conf.setBoolean("mapreduce.output.fileoutputformat.compress", true);
}
// Disable delegation token cancellation by default
if (conf.get("mapreduce.job.complete.cancel.delegation.tokens") == null) {
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
}
addJars(conf, this.state, fs);
Job job = Job.getInstance(conf);
job.setJobName(MRCompactorJobRunner.HADOOP_JOB_NAME);
boolean emptyDirectoryFlag = this.configureInputAndOutputPaths(job, dataset);
if (emptyDirectoryFlag) {
this.state.setProp(HiveRegistrationPolicy.MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY, true);
}
this.configureMapper(job);
this.configureReducer(job);
if (emptyDirectoryFlag || !this.shouldDeduplicate) {
job.setNumReduceTasks(0);
}
// Configure schema at the last step because FilesInputFormat will be used internally
this.configureSchema(job);
this.isJobCreated = true;
this.configuredJob = job;
return job;
} | java | public Job createJob(FileSystemDataset dataset) throws IOException {
Configuration conf = HadoopUtils.getConfFromState(state);
// Turn on mapreduce output compression by default
if (conf.get("mapreduce.output.fileoutputformat.compress") == null && conf.get("mapred.output.compress") == null) {
conf.setBoolean("mapreduce.output.fileoutputformat.compress", true);
}
// Disable delegation token cancellation by default
if (conf.get("mapreduce.job.complete.cancel.delegation.tokens") == null) {
conf.setBoolean("mapreduce.job.complete.cancel.delegation.tokens", false);
}
addJars(conf, this.state, fs);
Job job = Job.getInstance(conf);
job.setJobName(MRCompactorJobRunner.HADOOP_JOB_NAME);
boolean emptyDirectoryFlag = this.configureInputAndOutputPaths(job, dataset);
if (emptyDirectoryFlag) {
this.state.setProp(HiveRegistrationPolicy.MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY, true);
}
this.configureMapper(job);
this.configureReducer(job);
if (emptyDirectoryFlag || !this.shouldDeduplicate) {
job.setNumReduceTasks(0);
}
// Configure schema at the last step because FilesInputFormat will be used internally
this.configureSchema(job);
this.isJobCreated = true;
this.configuredJob = job;
return job;
} | [
"public",
"Job",
"createJob",
"(",
"FileSystemDataset",
"dataset",
")",
"throws",
"IOException",
"{",
"Configuration",
"conf",
"=",
"HadoopUtils",
".",
"getConfFromState",
"(",
"state",
")",
";",
"// Turn on mapreduce output compression by default",
"if",
"(",
"conf",
".",
"get",
"(",
"\"mapreduce.output.fileoutputformat.compress\"",
")",
"==",
"null",
"&&",
"conf",
".",
"get",
"(",
"\"mapred.output.compress\"",
")",
"==",
"null",
")",
"{",
"conf",
".",
"setBoolean",
"(",
"\"mapreduce.output.fileoutputformat.compress\"",
",",
"true",
")",
";",
"}",
"// Disable delegation token cancellation by default",
"if",
"(",
"conf",
".",
"get",
"(",
"\"mapreduce.job.complete.cancel.delegation.tokens\"",
")",
"==",
"null",
")",
"{",
"conf",
".",
"setBoolean",
"(",
"\"mapreduce.job.complete.cancel.delegation.tokens\"",
",",
"false",
")",
";",
"}",
"addJars",
"(",
"conf",
",",
"this",
".",
"state",
",",
"fs",
")",
";",
"Job",
"job",
"=",
"Job",
".",
"getInstance",
"(",
"conf",
")",
";",
"job",
".",
"setJobName",
"(",
"MRCompactorJobRunner",
".",
"HADOOP_JOB_NAME",
")",
";",
"boolean",
"emptyDirectoryFlag",
"=",
"this",
".",
"configureInputAndOutputPaths",
"(",
"job",
",",
"dataset",
")",
";",
"if",
"(",
"emptyDirectoryFlag",
")",
"{",
"this",
".",
"state",
".",
"setProp",
"(",
"HiveRegistrationPolicy",
".",
"MAPREDUCE_JOB_INPUT_PATH_EMPTY_KEY",
",",
"true",
")",
";",
"}",
"this",
".",
"configureMapper",
"(",
"job",
")",
";",
"this",
".",
"configureReducer",
"(",
"job",
")",
";",
"if",
"(",
"emptyDirectoryFlag",
"||",
"!",
"this",
".",
"shouldDeduplicate",
")",
"{",
"job",
".",
"setNumReduceTasks",
"(",
"0",
")",
";",
"}",
"// Configure schema at the last step because FilesInputFormat will be used internally",
"this",
".",
"configureSchema",
"(",
"job",
")",
";",
"this",
".",
"isJobCreated",
"=",
"true",
";",
"this",
".",
"configuredJob",
"=",
"job",
";",
"return",
"job",
";",
"}"
] | Customized MR job creation for Avro.
@param dataset A path or directory which needs compaction
@return A configured map-reduce job for avro compaction | [
"Customized",
"MR",
"job",
"creation",
"for",
"Avro",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java#L125-L155 |
25,788 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java | CompactionJobConfigurator.concatPaths | private Path concatPaths(String... names) {
if (names == null || names.length == 0) {
return null;
}
Path cur = new Path(names[0]);
for (int i = 1; i < names.length; ++i) {
cur = new Path(cur, new Path(names[i]));
}
return cur;
} | java | private Path concatPaths(String... names) {
if (names == null || names.length == 0) {
return null;
}
Path cur = new Path(names[0]);
for (int i = 1; i < names.length; ++i) {
cur = new Path(cur, new Path(names[i]));
}
return cur;
} | [
"private",
"Path",
"concatPaths",
"(",
"String",
"...",
"names",
")",
"{",
"if",
"(",
"names",
"==",
"null",
"||",
"names",
".",
"length",
"==",
"0",
")",
"{",
"return",
"null",
";",
"}",
"Path",
"cur",
"=",
"new",
"Path",
"(",
"names",
"[",
"0",
"]",
")",
";",
"for",
"(",
"int",
"i",
"=",
"1",
";",
"i",
"<",
"names",
".",
"length",
";",
"++",
"i",
")",
"{",
"cur",
"=",
"new",
"Path",
"(",
"cur",
",",
"new",
"Path",
"(",
"names",
"[",
"i",
"]",
")",
")",
";",
"}",
"return",
"cur",
";",
"}"
] | Concatenate multiple directory or file names into one path
@return Concatenated path or null if the parameter is empty | [
"Concatenate",
"multiple",
"directory",
"or",
"file",
"names",
"into",
"one",
"path"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java#L263-L272 |
25,789 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java | CompactionJobConfigurator.getGranularInputPaths | protected Collection<Path> getGranularInputPaths(Path path) throws IOException {
boolean appendDelta = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
Set<Path> uncompacted = Sets.newHashSet();
Set<Path> total = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, path)) {
if (appendDelta) {
// use source dir suffix to identify the delta input paths
if (!fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
uncompacted.add(fileStatus.getPath().getParent());
}
total.add(fileStatus.getPath().getParent());
} else {
uncompacted.add(fileStatus.getPath().getParent());
}
}
if (appendDelta) {
// When the output record count from mr counter doesn't match
// the record count from input file names, we prefer file names because
// it will be used to calculate the difference of count in next run.
this.fileNameRecordCount = new InputRecordCountHelper(this.state).calculateRecordCount(total);
log.info("{} has total input record count (based on file name) {}", path, this.fileNameRecordCount);
}
return uncompacted;
} | java | protected Collection<Path> getGranularInputPaths(Path path) throws IOException {
boolean appendDelta = this.state.getPropAsBoolean(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_ENABLED,
MRCompactor.DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED);
Set<Path> uncompacted = Sets.newHashSet();
Set<Path> total = Sets.newHashSet();
for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, path)) {
if (appendDelta) {
// use source dir suffix to identify the delta input paths
if (!fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
uncompacted.add(fileStatus.getPath().getParent());
}
total.add(fileStatus.getPath().getParent());
} else {
uncompacted.add(fileStatus.getPath().getParent());
}
}
if (appendDelta) {
// When the output record count from mr counter doesn't match
// the record count from input file names, we prefer file names because
// it will be used to calculate the difference of count in next run.
this.fileNameRecordCount = new InputRecordCountHelper(this.state).calculateRecordCount(total);
log.info("{} has total input record count (based on file name) {}", path, this.fileNameRecordCount);
}
return uncompacted;
} | [
"protected",
"Collection",
"<",
"Path",
">",
"getGranularInputPaths",
"(",
"Path",
"path",
")",
"throws",
"IOException",
"{",
"boolean",
"appendDelta",
"=",
"this",
".",
"state",
".",
"getPropAsBoolean",
"(",
"MRCompactor",
".",
"COMPACTION_RENAME_SOURCE_DIR_ENABLED",
",",
"MRCompactor",
".",
"DEFAULT_COMPACTION_RENAME_SOURCE_DIR_ENABLED",
")",
";",
"Set",
"<",
"Path",
">",
"uncompacted",
"=",
"Sets",
".",
"newHashSet",
"(",
")",
";",
"Set",
"<",
"Path",
">",
"total",
"=",
"Sets",
".",
"newHashSet",
"(",
")",
";",
"for",
"(",
"FileStatus",
"fileStatus",
":",
"FileListUtils",
".",
"listFilesRecursively",
"(",
"fs",
",",
"path",
")",
")",
"{",
"if",
"(",
"appendDelta",
")",
"{",
"// use source dir suffix to identify the delta input paths",
"if",
"(",
"!",
"fileStatus",
".",
"getPath",
"(",
")",
".",
"getParent",
"(",
")",
".",
"toString",
"(",
")",
".",
"endsWith",
"(",
"MRCompactor",
".",
"COMPACTION_RENAME_SOURCE_DIR_SUFFIX",
")",
")",
"{",
"uncompacted",
".",
"add",
"(",
"fileStatus",
".",
"getPath",
"(",
")",
".",
"getParent",
"(",
")",
")",
";",
"}",
"total",
".",
"add",
"(",
"fileStatus",
".",
"getPath",
"(",
")",
".",
"getParent",
"(",
")",
")",
";",
"}",
"else",
"{",
"uncompacted",
".",
"add",
"(",
"fileStatus",
".",
"getPath",
"(",
")",
".",
"getParent",
"(",
")",
")",
";",
"}",
"}",
"if",
"(",
"appendDelta",
")",
"{",
"// When the output record count from mr counter doesn't match",
"// the record count from input file names, we prefer file names because",
"// it will be used to calculate the difference of count in next run.",
"this",
".",
"fileNameRecordCount",
"=",
"new",
"InputRecordCountHelper",
"(",
"this",
".",
"state",
")",
".",
"calculateRecordCount",
"(",
"total",
")",
";",
"log",
".",
"info",
"(",
"\"{} has total input record count (based on file name) {}\"",
",",
"path",
",",
"this",
".",
"fileNameRecordCount",
")",
";",
"}",
"return",
"uncompacted",
";",
"}"
] | Converts a top level input path to a group of sub-paths according to user defined granularity.
This may be required because if upstream application generates many sub-paths but the map-reduce
job only keeps track of the top level path, after the job is done, we won't be able to tell if
those new arriving sub-paths is processed by previous map-reduce job or not. Hence a better way
is to pre-define those sub-paths as input paths before we start to run MR. The implementation of
this method should depend on the data generation granularity controlled by upstream. Here we just
list the deepest level of containing folder as the smallest granularity.
@param path top level directory needs compaction
@return A collection of input paths which will participate in map-reduce job | [
"Converts",
"a",
"top",
"level",
"input",
"path",
"to",
"a",
"group",
"of",
"sub",
"-",
"paths",
"according",
"to",
"user",
"defined",
"granularity",
".",
"This",
"may",
"be",
"required",
"because",
"if",
"upstream",
"application",
"generates",
"many",
"sub",
"-",
"paths",
"but",
"the",
"map",
"-",
"reduce",
"job",
"only",
"keeps",
"track",
"of",
"the",
"top",
"level",
"path",
"after",
"the",
"job",
"is",
"done",
"we",
"won",
"t",
"be",
"able",
"to",
"tell",
"if",
"those",
"new",
"arriving",
"sub",
"-",
"paths",
"is",
"processed",
"by",
"previous",
"map",
"-",
"reduce",
"job",
"or",
"not",
".",
"Hence",
"a",
"better",
"way",
"is",
"to",
"pre",
"-",
"define",
"those",
"sub",
"-",
"paths",
"as",
"input",
"paths",
"before",
"we",
"start",
"to",
"run",
"MR",
".",
"The",
"implementation",
"of",
"this",
"method",
"should",
"depend",
"on",
"the",
"data",
"generation",
"granularity",
"controlled",
"by",
"upstream",
".",
"Here",
"we",
"just",
"list",
"the",
"deepest",
"level",
"of",
"containing",
"folder",
"as",
"the",
"smallest",
"granularity",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/CompactionJobConfigurator.java#L286-L315 |
25,790 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/fork/CopyHelper.java | CopyHelper.isImmutableType | private static boolean isImmutableType(Object thing) {
return ((thing == null)
|| (thing instanceof String)
|| (thing instanceof Integer)
|| (thing instanceof Long));
} | java | private static boolean isImmutableType(Object thing) {
return ((thing == null)
|| (thing instanceof String)
|| (thing instanceof Integer)
|| (thing instanceof Long));
} | [
"private",
"static",
"boolean",
"isImmutableType",
"(",
"Object",
"thing",
")",
"{",
"return",
"(",
"(",
"thing",
"==",
"null",
")",
"||",
"(",
"thing",
"instanceof",
"String",
")",
"||",
"(",
"thing",
"instanceof",
"Integer",
")",
"||",
"(",
"thing",
"instanceof",
"Long",
")",
")",
";",
"}"
] | Contains a collection of supported immutable types for copying.
Only keep the types that are worth supporting as record types.
@param thing: an Object being checked
@return true if supported immutable type, false otherwise | [
"Contains",
"a",
"collection",
"of",
"supported",
"immutable",
"types",
"for",
"copying",
".",
"Only",
"keep",
"the",
"types",
"that",
"are",
"worth",
"supporting",
"as",
"record",
"types",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/fork/CopyHelper.java#L47-L52 |
25,791 | apache/incubator-gobblin | gobblin-api/src/main/java/org/apache/gobblin/fork/CopyHelper.java | CopyHelper.copy | public static Object copy(Object thing) throws CopyNotSupportedException {
if (!isCopyable(thing)) {
throw new CopyNotSupportedException(thing.getClass().getName() + " cannot be copied. See Copyable");
}
if (thing instanceof Copyable) {
return ((Copyable) thing).copy();
}
// Support for a few primitive types out of the box
if (thing instanceof byte[]) {
byte[] copy = new byte[((byte[]) thing).length];
System.arraycopy(thing, 0, copy, 0, ((byte[]) thing).length);
return copy;
}
// Assume that everything other type is immutable, not checking this again
return thing;
} | java | public static Object copy(Object thing) throws CopyNotSupportedException {
if (!isCopyable(thing)) {
throw new CopyNotSupportedException(thing.getClass().getName() + " cannot be copied. See Copyable");
}
if (thing instanceof Copyable) {
return ((Copyable) thing).copy();
}
// Support for a few primitive types out of the box
if (thing instanceof byte[]) {
byte[] copy = new byte[((byte[]) thing).length];
System.arraycopy(thing, 0, copy, 0, ((byte[]) thing).length);
return copy;
}
// Assume that everything other type is immutable, not checking this again
return thing;
} | [
"public",
"static",
"Object",
"copy",
"(",
"Object",
"thing",
")",
"throws",
"CopyNotSupportedException",
"{",
"if",
"(",
"!",
"isCopyable",
"(",
"thing",
")",
")",
"{",
"throw",
"new",
"CopyNotSupportedException",
"(",
"thing",
".",
"getClass",
"(",
")",
".",
"getName",
"(",
")",
"+",
"\" cannot be copied. See Copyable\"",
")",
";",
"}",
"if",
"(",
"thing",
"instanceof",
"Copyable",
")",
"{",
"return",
"(",
"(",
"Copyable",
")",
"thing",
")",
".",
"copy",
"(",
")",
";",
"}",
"// Support for a few primitive types out of the box",
"if",
"(",
"thing",
"instanceof",
"byte",
"[",
"]",
")",
"{",
"byte",
"[",
"]",
"copy",
"=",
"new",
"byte",
"[",
"(",
"(",
"byte",
"[",
"]",
")",
"thing",
")",
".",
"length",
"]",
";",
"System",
".",
"arraycopy",
"(",
"thing",
",",
"0",
",",
"copy",
",",
"0",
",",
"(",
"(",
"byte",
"[",
"]",
")",
"thing",
")",
".",
"length",
")",
";",
"return",
"copy",
";",
"}",
"// Assume that everything other type is immutable, not checking this again",
"return",
"thing",
";",
"}"
] | Copy this object if needed.
@param thing : this object that needs to be copied
@return: a possibly copied instance
@throws CopyNotSupportedException if thing needs to be copied but cannot be | [
"Copy",
"this",
"object",
"if",
"needed",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-api/src/main/java/org/apache/gobblin/fork/CopyHelper.java#L60-L76 |
25,792 | apache/incubator-gobblin | gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java | JobInfoPrintUtils.printJobRuns | public static void printJobRuns(List<JobExecutionInfo> jobExecutionInfos) {
if (jobExecutionInfos == null) {
System.err.println("No job executions found.");
System.exit(1);
}
List<String> labels = Arrays.asList("Job Id", "State", "Schedule", "Completed Tasks", "Launched Tasks",
"Start Time", "End Time", "Duration (s)");
List<String> flags = Arrays.asList("-", "-", "-", "", "", "-", "-", "-");
List<List<String>> data = new ArrayList<>();
for (JobExecutionInfo jobInfo : jobExecutionInfos) {
List<String> entry = new ArrayList<>();
entry.add(jobInfo.getJobId());
entry.add(jobInfo.getState().toString());
entry.add(extractJobSchedule(jobInfo));
entry.add(jobInfo.getCompletedTasks().toString());
entry.add(jobInfo.getLaunchedTasks().toString());
entry.add(dateTimeFormatter.print(jobInfo.getStartTime()));
entry.add(dateTimeFormatter.print(jobInfo.getEndTime()));
entry.add(jobInfo.getState() == JobStateEnum.COMMITTED ?
decimalFormatter.format(jobInfo.getDuration() / 1000.0) : "-");
data.add(entry);
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | java | public static void printJobRuns(List<JobExecutionInfo> jobExecutionInfos) {
if (jobExecutionInfos == null) {
System.err.println("No job executions found.");
System.exit(1);
}
List<String> labels = Arrays.asList("Job Id", "State", "Schedule", "Completed Tasks", "Launched Tasks",
"Start Time", "End Time", "Duration (s)");
List<String> flags = Arrays.asList("-", "-", "-", "", "", "-", "-", "-");
List<List<String>> data = new ArrayList<>();
for (JobExecutionInfo jobInfo : jobExecutionInfos) {
List<String> entry = new ArrayList<>();
entry.add(jobInfo.getJobId());
entry.add(jobInfo.getState().toString());
entry.add(extractJobSchedule(jobInfo));
entry.add(jobInfo.getCompletedTasks().toString());
entry.add(jobInfo.getLaunchedTasks().toString());
entry.add(dateTimeFormatter.print(jobInfo.getStartTime()));
entry.add(dateTimeFormatter.print(jobInfo.getEndTime()));
entry.add(jobInfo.getState() == JobStateEnum.COMMITTED ?
decimalFormatter.format(jobInfo.getDuration() / 1000.0) : "-");
data.add(entry);
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | [
"public",
"static",
"void",
"printJobRuns",
"(",
"List",
"<",
"JobExecutionInfo",
">",
"jobExecutionInfos",
")",
"{",
"if",
"(",
"jobExecutionInfos",
"==",
"null",
")",
"{",
"System",
".",
"err",
".",
"println",
"(",
"\"No job executions found.\"",
")",
";",
"System",
".",
"exit",
"(",
"1",
")",
";",
"}",
"List",
"<",
"String",
">",
"labels",
"=",
"Arrays",
".",
"asList",
"(",
"\"Job Id\"",
",",
"\"State\"",
",",
"\"Schedule\"",
",",
"\"Completed Tasks\"",
",",
"\"Launched Tasks\"",
",",
"\"Start Time\"",
",",
"\"End Time\"",
",",
"\"Duration (s)\"",
")",
";",
"List",
"<",
"String",
">",
"flags",
"=",
"Arrays",
".",
"asList",
"(",
"\"-\"",
",",
"\"-\"",
",",
"\"-\"",
",",
"\"\"",
",",
"\"\"",
",",
"\"-\"",
",",
"\"-\"",
",",
"\"-\"",
")",
";",
"List",
"<",
"List",
"<",
"String",
">",
">",
"data",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"for",
"(",
"JobExecutionInfo",
"jobInfo",
":",
"jobExecutionInfos",
")",
"{",
"List",
"<",
"String",
">",
"entry",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getJobId",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getState",
"(",
")",
".",
"toString",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"extractJobSchedule",
"(",
"jobInfo",
")",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getCompletedTasks",
"(",
")",
".",
"toString",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getLaunchedTasks",
"(",
")",
".",
"toString",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"dateTimeFormatter",
".",
"print",
"(",
"jobInfo",
".",
"getStartTime",
"(",
")",
")",
")",
";",
"entry",
".",
"add",
"(",
"dateTimeFormatter",
".",
"print",
"(",
"jobInfo",
".",
"getEndTime",
"(",
")",
")",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getState",
"(",
")",
"==",
"JobStateEnum",
".",
"COMMITTED",
"?",
"decimalFormatter",
".",
"format",
"(",
"jobInfo",
".",
"getDuration",
"(",
")",
"/",
"1000.0",
")",
":",
"\"-\"",
")",
";",
"data",
".",
"add",
"(",
"entry",
")",
";",
"}",
"new",
"CliTablePrinter",
".",
"Builder",
"(",
")",
".",
"labels",
"(",
"labels",
")",
".",
"data",
"(",
"data",
")",
".",
"flags",
"(",
"flags",
")",
".",
"delimiterWidth",
"(",
"2",
")",
".",
"build",
"(",
")",
".",
"printTable",
"(",
")",
";",
"}"
] | Print a table describing a bunch of individual job executions.
@param jobExecutionInfos Job execution status to print | [
"Print",
"a",
"table",
"describing",
"a",
"bunch",
"of",
"individual",
"job",
"executions",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java#L71-L101 |
25,793 | apache/incubator-gobblin | gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java | JobInfoPrintUtils.printAllJobs | public static void printAllJobs(List<JobExecutionInfo> jobExecutionInfos, int resultsLimit) {
if (jobExecutionInfos == null) {
System.err.println("No jobs found.");
System.exit(1);
}
List<String> labels = Arrays.asList("Job Name", "State", "Last Run Started", "Last Run Completed",
"Schedule", "Last Run Records Processed", "Last Run Records Failed");
List<String> flags = Arrays.asList("-", "-", "-", "-", "-", "", "");
List<List<String>> data = new ArrayList<>();
for (JobExecutionInfo jobInfo : jobExecutionInfos) {
List<String> entry = new ArrayList<>();
entry.add(jobInfo.getJobName());
entry.add(jobInfo.getState().toString());
entry.add(dateTimeFormatter.print(jobInfo.getStartTime()));
entry.add(dateTimeFormatter.print(jobInfo.getEndTime()));
entry.add(extractJobSchedule(jobInfo));
// Add metrics
MetricArray metrics = jobInfo.getMetrics();
Double recordsProcessed = null;
Double recordsFailed = null;
try {
for (Metric metric : metrics) {
if (metric.getName().equals(MetricNames.ExtractorMetrics.RECORDS_READ_METER)) {
recordsProcessed = Double.parseDouble(metric.getValue());
} else if (metric.getName().equals(MetricNames.ExtractorMetrics.RECORDS_FAILED_METER)) {
recordsFailed = Double.parseDouble(metric.getValue());
}
}
if (recordsProcessed != null && recordsFailed != null) {
entry.add(recordsProcessed.toString());
entry.add(recordsFailed.toString());
}
} catch (NumberFormatException ex) {
System.err.println("Failed to process metrics");
}
if (recordsProcessed == null || recordsFailed == null) {
entry.add("-");
entry.add("-");
}
data.add(entry);
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
if (jobExecutionInfos.size() == resultsLimit) {
System.out.println("\nWARNING: There may be more jobs (# of results is equal to the limit)");
}
} | java | public static void printAllJobs(List<JobExecutionInfo> jobExecutionInfos, int resultsLimit) {
if (jobExecutionInfos == null) {
System.err.println("No jobs found.");
System.exit(1);
}
List<String> labels = Arrays.asList("Job Name", "State", "Last Run Started", "Last Run Completed",
"Schedule", "Last Run Records Processed", "Last Run Records Failed");
List<String> flags = Arrays.asList("-", "-", "-", "-", "-", "", "");
List<List<String>> data = new ArrayList<>();
for (JobExecutionInfo jobInfo : jobExecutionInfos) {
List<String> entry = new ArrayList<>();
entry.add(jobInfo.getJobName());
entry.add(jobInfo.getState().toString());
entry.add(dateTimeFormatter.print(jobInfo.getStartTime()));
entry.add(dateTimeFormatter.print(jobInfo.getEndTime()));
entry.add(extractJobSchedule(jobInfo));
// Add metrics
MetricArray metrics = jobInfo.getMetrics();
Double recordsProcessed = null;
Double recordsFailed = null;
try {
for (Metric metric : metrics) {
if (metric.getName().equals(MetricNames.ExtractorMetrics.RECORDS_READ_METER)) {
recordsProcessed = Double.parseDouble(metric.getValue());
} else if (metric.getName().equals(MetricNames.ExtractorMetrics.RECORDS_FAILED_METER)) {
recordsFailed = Double.parseDouble(metric.getValue());
}
}
if (recordsProcessed != null && recordsFailed != null) {
entry.add(recordsProcessed.toString());
entry.add(recordsFailed.toString());
}
} catch (NumberFormatException ex) {
System.err.println("Failed to process metrics");
}
if (recordsProcessed == null || recordsFailed == null) {
entry.add("-");
entry.add("-");
}
data.add(entry);
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
if (jobExecutionInfos.size() == resultsLimit) {
System.out.println("\nWARNING: There may be more jobs (# of results is equal to the limit)");
}
} | [
"public",
"static",
"void",
"printAllJobs",
"(",
"List",
"<",
"JobExecutionInfo",
">",
"jobExecutionInfos",
",",
"int",
"resultsLimit",
")",
"{",
"if",
"(",
"jobExecutionInfos",
"==",
"null",
")",
"{",
"System",
".",
"err",
".",
"println",
"(",
"\"No jobs found.\"",
")",
";",
"System",
".",
"exit",
"(",
"1",
")",
";",
"}",
"List",
"<",
"String",
">",
"labels",
"=",
"Arrays",
".",
"asList",
"(",
"\"Job Name\"",
",",
"\"State\"",
",",
"\"Last Run Started\"",
",",
"\"Last Run Completed\"",
",",
"\"Schedule\"",
",",
"\"Last Run Records Processed\"",
",",
"\"Last Run Records Failed\"",
")",
";",
"List",
"<",
"String",
">",
"flags",
"=",
"Arrays",
".",
"asList",
"(",
"\"-\"",
",",
"\"-\"",
",",
"\"-\"",
",",
"\"-\"",
",",
"\"-\"",
",",
"\"\"",
",",
"\"\"",
")",
";",
"List",
"<",
"List",
"<",
"String",
">",
">",
"data",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"for",
"(",
"JobExecutionInfo",
"jobInfo",
":",
"jobExecutionInfos",
")",
"{",
"List",
"<",
"String",
">",
"entry",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getJobName",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"jobInfo",
".",
"getState",
"(",
")",
".",
"toString",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"dateTimeFormatter",
".",
"print",
"(",
"jobInfo",
".",
"getStartTime",
"(",
")",
")",
")",
";",
"entry",
".",
"add",
"(",
"dateTimeFormatter",
".",
"print",
"(",
"jobInfo",
".",
"getEndTime",
"(",
")",
")",
")",
";",
"entry",
".",
"add",
"(",
"extractJobSchedule",
"(",
"jobInfo",
")",
")",
";",
"// Add metrics",
"MetricArray",
"metrics",
"=",
"jobInfo",
".",
"getMetrics",
"(",
")",
";",
"Double",
"recordsProcessed",
"=",
"null",
";",
"Double",
"recordsFailed",
"=",
"null",
";",
"try",
"{",
"for",
"(",
"Metric",
"metric",
":",
"metrics",
")",
"{",
"if",
"(",
"metric",
".",
"getName",
"(",
")",
".",
"equals",
"(",
"MetricNames",
".",
"ExtractorMetrics",
".",
"RECORDS_READ_METER",
")",
")",
"{",
"recordsProcessed",
"=",
"Double",
".",
"parseDouble",
"(",
"metric",
".",
"getValue",
"(",
")",
")",
";",
"}",
"else",
"if",
"(",
"metric",
".",
"getName",
"(",
")",
".",
"equals",
"(",
"MetricNames",
".",
"ExtractorMetrics",
".",
"RECORDS_FAILED_METER",
")",
")",
"{",
"recordsFailed",
"=",
"Double",
".",
"parseDouble",
"(",
"metric",
".",
"getValue",
"(",
")",
")",
";",
"}",
"}",
"if",
"(",
"recordsProcessed",
"!=",
"null",
"&&",
"recordsFailed",
"!=",
"null",
")",
"{",
"entry",
".",
"add",
"(",
"recordsProcessed",
".",
"toString",
"(",
")",
")",
";",
"entry",
".",
"add",
"(",
"recordsFailed",
".",
"toString",
"(",
")",
")",
";",
"}",
"}",
"catch",
"(",
"NumberFormatException",
"ex",
")",
"{",
"System",
".",
"err",
".",
"println",
"(",
"\"Failed to process metrics\"",
")",
";",
"}",
"if",
"(",
"recordsProcessed",
"==",
"null",
"||",
"recordsFailed",
"==",
"null",
")",
"{",
"entry",
".",
"add",
"(",
"\"-\"",
")",
";",
"entry",
".",
"add",
"(",
"\"-\"",
")",
";",
"}",
"data",
".",
"add",
"(",
"entry",
")",
";",
"}",
"new",
"CliTablePrinter",
".",
"Builder",
"(",
")",
".",
"labels",
"(",
"labels",
")",
".",
"data",
"(",
"data",
")",
".",
"flags",
"(",
"flags",
")",
".",
"delimiterWidth",
"(",
"2",
")",
".",
"build",
"(",
")",
".",
"printTable",
"(",
")",
";",
"if",
"(",
"jobExecutionInfos",
".",
"size",
"(",
")",
"==",
"resultsLimit",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
"\"\\nWARNING: There may be more jobs (# of results is equal to the limit)\"",
")",
";",
"}",
"}"
] | Print summary information about a bunch of jobs in the system
@param jobExecutionInfos List of jobs
@param resultsLimit original result limit | [
"Print",
"summary",
"information",
"about",
"a",
"bunch",
"of",
"jobs",
"in",
"the",
"system"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java#L108-L165 |
25,794 | apache/incubator-gobblin | gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java | JobInfoPrintUtils.printJob | public static void printJob(Optional<JobExecutionInfo> jobExecutionInfoOptional) {
if (!jobExecutionInfoOptional.isPresent()) {
System.err.println("Job id not found.");
return;
}
JobExecutionInfo jobExecutionInfo = jobExecutionInfoOptional.get();
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
data.add(Arrays.asList("Job Name", jobExecutionInfo.getJobName()));
data.add(Arrays.asList("Job Id", jobExecutionInfo.getJobId()));
data.add(Arrays.asList("State", jobExecutionInfo.getState().toString()));
data.add(Arrays.asList("Completed/Launched Tasks",
String.format("%d/%d", jobExecutionInfo.getCompletedTasks(), jobExecutionInfo.getLaunchedTasks())));
data.add(Arrays.asList("Start Time", dateTimeFormatter.print(jobExecutionInfo.getStartTime())));
data.add(Arrays.asList("End Time", dateTimeFormatter.print(jobExecutionInfo.getEndTime())));
data.add(Arrays.asList("Duration", jobExecutionInfo.getState() == JobStateEnum.COMMITTED ? periodFormatter
.print(new Period(jobExecutionInfo.getDuration().longValue())) : "-"));
data.add(Arrays.asList("Tracking URL", jobExecutionInfo.getTrackingUrl()));
data.add(Arrays.asList("Launcher Type", jobExecutionInfo.getLauncherType().name()));
new CliTablePrinter.Builder()
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
JobInfoPrintUtils.printMetrics(jobExecutionInfo.getMetrics());
} | java | public static void printJob(Optional<JobExecutionInfo> jobExecutionInfoOptional) {
if (!jobExecutionInfoOptional.isPresent()) {
System.err.println("Job id not found.");
return;
}
JobExecutionInfo jobExecutionInfo = jobExecutionInfoOptional.get();
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
data.add(Arrays.asList("Job Name", jobExecutionInfo.getJobName()));
data.add(Arrays.asList("Job Id", jobExecutionInfo.getJobId()));
data.add(Arrays.asList("State", jobExecutionInfo.getState().toString()));
data.add(Arrays.asList("Completed/Launched Tasks",
String.format("%d/%d", jobExecutionInfo.getCompletedTasks(), jobExecutionInfo.getLaunchedTasks())));
data.add(Arrays.asList("Start Time", dateTimeFormatter.print(jobExecutionInfo.getStartTime())));
data.add(Arrays.asList("End Time", dateTimeFormatter.print(jobExecutionInfo.getEndTime())));
data.add(Arrays.asList("Duration", jobExecutionInfo.getState() == JobStateEnum.COMMITTED ? periodFormatter
.print(new Period(jobExecutionInfo.getDuration().longValue())) : "-"));
data.add(Arrays.asList("Tracking URL", jobExecutionInfo.getTrackingUrl()));
data.add(Arrays.asList("Launcher Type", jobExecutionInfo.getLauncherType().name()));
new CliTablePrinter.Builder()
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
JobInfoPrintUtils.printMetrics(jobExecutionInfo.getMetrics());
} | [
"public",
"static",
"void",
"printJob",
"(",
"Optional",
"<",
"JobExecutionInfo",
">",
"jobExecutionInfoOptional",
")",
"{",
"if",
"(",
"!",
"jobExecutionInfoOptional",
".",
"isPresent",
"(",
")",
")",
"{",
"System",
".",
"err",
".",
"println",
"(",
"\"Job id not found.\"",
")",
";",
"return",
";",
"}",
"JobExecutionInfo",
"jobExecutionInfo",
"=",
"jobExecutionInfoOptional",
".",
"get",
"(",
")",
";",
"List",
"<",
"List",
"<",
"String",
">",
">",
"data",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"List",
"<",
"String",
">",
"flags",
"=",
"Arrays",
".",
"asList",
"(",
"\"\"",
",",
"\"-\"",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Job Name\"",
",",
"jobExecutionInfo",
".",
"getJobName",
"(",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Job Id\"",
",",
"jobExecutionInfo",
".",
"getJobId",
"(",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"State\"",
",",
"jobExecutionInfo",
".",
"getState",
"(",
")",
".",
"toString",
"(",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Completed/Launched Tasks\"",
",",
"String",
".",
"format",
"(",
"\"%d/%d\"",
",",
"jobExecutionInfo",
".",
"getCompletedTasks",
"(",
")",
",",
"jobExecutionInfo",
".",
"getLaunchedTasks",
"(",
")",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Start Time\"",
",",
"dateTimeFormatter",
".",
"print",
"(",
"jobExecutionInfo",
".",
"getStartTime",
"(",
")",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"End Time\"",
",",
"dateTimeFormatter",
".",
"print",
"(",
"jobExecutionInfo",
".",
"getEndTime",
"(",
")",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Duration\"",
",",
"jobExecutionInfo",
".",
"getState",
"(",
")",
"==",
"JobStateEnum",
".",
"COMMITTED",
"?",
"periodFormatter",
".",
"print",
"(",
"new",
"Period",
"(",
"jobExecutionInfo",
".",
"getDuration",
"(",
")",
".",
"longValue",
"(",
")",
")",
")",
":",
"\"-\"",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Tracking URL\"",
",",
"jobExecutionInfo",
".",
"getTrackingUrl",
"(",
")",
")",
")",
";",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"\"Launcher Type\"",
",",
"jobExecutionInfo",
".",
"getLauncherType",
"(",
")",
".",
"name",
"(",
")",
")",
")",
";",
"new",
"CliTablePrinter",
".",
"Builder",
"(",
")",
".",
"data",
"(",
"data",
")",
".",
"flags",
"(",
"flags",
")",
".",
"delimiterWidth",
"(",
"2",
")",
".",
"build",
"(",
")",
".",
"printTable",
"(",
")",
";",
"JobInfoPrintUtils",
".",
"printMetrics",
"(",
"jobExecutionInfo",
".",
"getMetrics",
"(",
")",
")",
";",
"}"
] | Print information about one specific job.
@param jobExecutionInfoOptional Job info to print | [
"Print",
"information",
"about",
"one",
"specific",
"job",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java#L171-L201 |
25,795 | apache/incubator-gobblin | gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java | JobInfoPrintUtils.printJobProperties | public static void printJobProperties(Optional<JobExecutionInfo> jobExecutionInfoOptional) {
if (!jobExecutionInfoOptional.isPresent()) {
System.err.println("Job not found.");
return;
}
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
List<String> labels = Arrays.asList("Property Key", "Property Value");
for (Map.Entry<String, String> entry : jobExecutionInfoOptional.get().getJobProperties().entrySet()) {
data.add(Arrays.asList(entry.getKey(), entry.getValue()));
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | java | public static void printJobProperties(Optional<JobExecutionInfo> jobExecutionInfoOptional) {
if (!jobExecutionInfoOptional.isPresent()) {
System.err.println("Job not found.");
return;
}
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
List<String> labels = Arrays.asList("Property Key", "Property Value");
for (Map.Entry<String, String> entry : jobExecutionInfoOptional.get().getJobProperties().entrySet()) {
data.add(Arrays.asList(entry.getKey(), entry.getValue()));
}
new CliTablePrinter.Builder()
.labels(labels)
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | [
"public",
"static",
"void",
"printJobProperties",
"(",
"Optional",
"<",
"JobExecutionInfo",
">",
"jobExecutionInfoOptional",
")",
"{",
"if",
"(",
"!",
"jobExecutionInfoOptional",
".",
"isPresent",
"(",
")",
")",
"{",
"System",
".",
"err",
".",
"println",
"(",
"\"Job not found.\"",
")",
";",
"return",
";",
"}",
"List",
"<",
"List",
"<",
"String",
">",
">",
"data",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"List",
"<",
"String",
">",
"flags",
"=",
"Arrays",
".",
"asList",
"(",
"\"\"",
",",
"\"-\"",
")",
";",
"List",
"<",
"String",
">",
"labels",
"=",
"Arrays",
".",
"asList",
"(",
"\"Property Key\"",
",",
"\"Property Value\"",
")",
";",
"for",
"(",
"Map",
".",
"Entry",
"<",
"String",
",",
"String",
">",
"entry",
":",
"jobExecutionInfoOptional",
".",
"get",
"(",
")",
".",
"getJobProperties",
"(",
")",
".",
"entrySet",
"(",
")",
")",
"{",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"entry",
".",
"getKey",
"(",
")",
",",
"entry",
".",
"getValue",
"(",
")",
")",
")",
";",
"}",
"new",
"CliTablePrinter",
".",
"Builder",
"(",
")",
".",
"labels",
"(",
"labels",
")",
".",
"data",
"(",
"data",
")",
".",
"flags",
"(",
"flags",
")",
".",
"delimiterWidth",
"(",
"2",
")",
".",
"build",
"(",
")",
".",
"printTable",
"(",
")",
";",
"}"
] | Print properties of a specific job
@param jobExecutionInfoOptional | [
"Print",
"properties",
"of",
"a",
"specific",
"job"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java#L207-L227 |
25,796 | apache/incubator-gobblin | gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java | JobInfoPrintUtils.printMetrics | private static void printMetrics(MetricArray metrics) {
System.out.println();
if (metrics.size() == 0) {
System.out.println("No metrics found.");
return;
}
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
for (Metric metric : metrics) {
data.add(Arrays.asList(metric.getName(), metric.getValue()));
}
new CliTablePrinter.Builder()
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | java | private static void printMetrics(MetricArray metrics) {
System.out.println();
if (metrics.size() == 0) {
System.out.println("No metrics found.");
return;
}
List<List<String>> data = new ArrayList<>();
List<String> flags = Arrays.asList("", "-");
for (Metric metric : metrics) {
data.add(Arrays.asList(metric.getName(), metric.getValue()));
}
new CliTablePrinter.Builder()
.data(data)
.flags(flags)
.delimiterWidth(2)
.build()
.printTable();
} | [
"private",
"static",
"void",
"printMetrics",
"(",
"MetricArray",
"metrics",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
")",
";",
"if",
"(",
"metrics",
".",
"size",
"(",
")",
"==",
"0",
")",
"{",
"System",
".",
"out",
".",
"println",
"(",
"\"No metrics found.\"",
")",
";",
"return",
";",
"}",
"List",
"<",
"List",
"<",
"String",
">",
">",
"data",
"=",
"new",
"ArrayList",
"<>",
"(",
")",
";",
"List",
"<",
"String",
">",
"flags",
"=",
"Arrays",
".",
"asList",
"(",
"\"\"",
",",
"\"-\"",
")",
";",
"for",
"(",
"Metric",
"metric",
":",
"metrics",
")",
"{",
"data",
".",
"add",
"(",
"Arrays",
".",
"asList",
"(",
"metric",
".",
"getName",
"(",
")",
",",
"metric",
".",
"getValue",
"(",
")",
")",
")",
";",
"}",
"new",
"CliTablePrinter",
".",
"Builder",
"(",
")",
".",
"data",
"(",
"data",
")",
".",
"flags",
"(",
"flags",
")",
".",
"delimiterWidth",
"(",
"2",
")",
".",
"build",
"(",
")",
".",
"printTable",
"(",
")",
";",
"}"
] | Print out various metrics
@param metrics Metrics to print | [
"Print",
"out",
"various",
"metrics"
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-admin/src/main/java/org/apache/gobblin/cli/JobInfoPrintUtils.java#L233-L254 |
25,797 | apache/incubator-gobblin | gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/EmbeddedWikipediaExample.java | EmbeddedWikipediaExample.lookback | @CliObjectOption(description = "Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)")
public EmbeddedWikipediaExample lookback(String isoLookback) {
this.setConfiguration(WikipediaExtractor.BOOTSTRAP_PERIOD, isoLookback);
return this;
} | java | @CliObjectOption(description = "Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)")
public EmbeddedWikipediaExample lookback(String isoLookback) {
this.setConfiguration(WikipediaExtractor.BOOTSTRAP_PERIOD, isoLookback);
return this;
} | [
"@",
"CliObjectOption",
"(",
"description",
"=",
"\"Sets the period for which articles should be pulled in ISO time format (e.g. P2D, PT1H)\"",
")",
"public",
"EmbeddedWikipediaExample",
"lookback",
"(",
"String",
"isoLookback",
")",
"{",
"this",
".",
"setConfiguration",
"(",
"WikipediaExtractor",
".",
"BOOTSTRAP_PERIOD",
",",
"isoLookback",
")",
";",
"return",
"this",
";",
"}"
] | Set bootstrap lookback, i.e. oldest revision to pull. | [
"Set",
"bootstrap",
"lookback",
"i",
".",
"e",
".",
"oldest",
"revision",
"to",
"pull",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/EmbeddedWikipediaExample.java#L83-L87 |
25,798 | apache/incubator-gobblin | gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/EmbeddedWikipediaExample.java | EmbeddedWikipediaExample.avroOutput | @CliObjectOption(description = "Write output to Avro files. Specify the output directory as argument.")
public EmbeddedWikipediaExample avroOutput(String outputPath) {
this.setConfiguration(ConfigurationKeys.WRITER_BUILDER_CLASS, AvroDataWriterBuilder.class.getName());
this.setConfiguration(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, Destination.DestinationType.HDFS.name());
this.setConfiguration(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, WriterOutputFormat.AVRO.name());
this.setConfiguration(ConfigurationKeys.WRITER_PARTITIONER_CLASS, WikipediaPartitioner.class.getName());
this.setConfiguration(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE, BaseDataPublisher.class.getName());
this.setConfiguration(ConfigurationKeys.CONVERTER_CLASSES_KEY, WikipediaConverter.class.getName());
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, outputPath);
return this;
} | java | @CliObjectOption(description = "Write output to Avro files. Specify the output directory as argument.")
public EmbeddedWikipediaExample avroOutput(String outputPath) {
this.setConfiguration(ConfigurationKeys.WRITER_BUILDER_CLASS, AvroDataWriterBuilder.class.getName());
this.setConfiguration(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, Destination.DestinationType.HDFS.name());
this.setConfiguration(ConfigurationKeys.WRITER_OUTPUT_FORMAT_KEY, WriterOutputFormat.AVRO.name());
this.setConfiguration(ConfigurationKeys.WRITER_PARTITIONER_CLASS, WikipediaPartitioner.class.getName());
this.setConfiguration(ConfigurationKeys.JOB_DATA_PUBLISHER_TYPE, BaseDataPublisher.class.getName());
this.setConfiguration(ConfigurationKeys.CONVERTER_CLASSES_KEY, WikipediaConverter.class.getName());
this.setConfiguration(ConfigurationKeys.DATA_PUBLISHER_FINAL_DIR, outputPath);
return this;
} | [
"@",
"CliObjectOption",
"(",
"description",
"=",
"\"Write output to Avro files. Specify the output directory as argument.\"",
")",
"public",
"EmbeddedWikipediaExample",
"avroOutput",
"(",
"String",
"outputPath",
")",
"{",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"WRITER_BUILDER_CLASS",
",",
"AvroDataWriterBuilder",
".",
"class",
".",
"getName",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"WRITER_DESTINATION_TYPE_KEY",
",",
"Destination",
".",
"DestinationType",
".",
"HDFS",
".",
"name",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"WRITER_OUTPUT_FORMAT_KEY",
",",
"WriterOutputFormat",
".",
"AVRO",
".",
"name",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"WRITER_PARTITIONER_CLASS",
",",
"WikipediaPartitioner",
".",
"class",
".",
"getName",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"JOB_DATA_PUBLISHER_TYPE",
",",
"BaseDataPublisher",
".",
"class",
".",
"getName",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"CONVERTER_CLASSES_KEY",
",",
"WikipediaConverter",
".",
"class",
".",
"getName",
"(",
")",
")",
";",
"this",
".",
"setConfiguration",
"(",
"ConfigurationKeys",
".",
"DATA_PUBLISHER_FINAL_DIR",
",",
"outputPath",
")",
";",
"return",
"this",
";",
"}"
] | Write output to avro files at the given input location. | [
"Write",
"output",
"to",
"avro",
"files",
"at",
"the",
"given",
"input",
"location",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-example/src/main/java/org/apache/gobblin/example/wikipedia/EmbeddedWikipediaExample.java#L92-L102 |
25,799 | apache/incubator-gobblin | gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueMapper.java | OrcValueMapper.convertOrcStructToOrcKey | protected OrcKey convertOrcStructToOrcKey(OrcStruct struct) {
OrcKey orcKey = new OrcKey();
orcKey.key = struct;
return orcKey;
} | java | protected OrcKey convertOrcStructToOrcKey(OrcStruct struct) {
OrcKey orcKey = new OrcKey();
orcKey.key = struct;
return orcKey;
} | [
"protected",
"OrcKey",
"convertOrcStructToOrcKey",
"(",
"OrcStruct",
"struct",
")",
"{",
"OrcKey",
"orcKey",
"=",
"new",
"OrcKey",
"(",
")",
";",
"orcKey",
".",
"key",
"=",
"struct",
";",
"return",
"orcKey",
";",
"}"
] | The output key of mapper needs to be comparable. In the scenarios that we need the orc record itself
to be the output key, this conversion will be necessary. | [
"The",
"output",
"key",
"of",
"mapper",
"needs",
"to",
"be",
"comparable",
".",
"In",
"the",
"scenarios",
"that",
"we",
"need",
"the",
"orc",
"record",
"itself",
"to",
"be",
"the",
"output",
"key",
"this",
"conversion",
"will",
"be",
"necessary",
"."
] | f029b4c0fea0fe4aa62f36dda2512344ff708bae | https://github.com/apache/incubator-gobblin/blob/f029b4c0fea0fe4aa62f36dda2512344ff708bae/gobblin-compaction/src/main/java/org/apache/gobblin/compaction/mapreduce/orc/OrcValueMapper.java#L72-L76 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.