repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigOption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.commons.cli.Option;
/**
* Class which extends the basic option object and adds in the configuration id
* and a default value so a central place can be used for retrieval of these as
* needed
*/
class ConfigOption<T> extends Option {
private static final long serialVersionUID = 7218954906367671150L;
// config starts with this prefix
private static final String SLIVE_PREFIX = "slive";
// command line options and descriptions and config option name
static final ConfigOption<Integer> MAPS = new ConfigOption<Integer>(
"maps", true, "Number of maps", SLIVE_PREFIX + ".maps", 10);
static final ConfigOption<Integer> REDUCES = new ConfigOption<Integer>(
"reduces", true, "Number of reduces", SLIVE_PREFIX + ".reduces", 1);
static final ConfigOption<Integer> OPS = new ConfigOption<Integer>(
"ops", true, "Max number of operations per map", SLIVE_PREFIX
+ ".map.ops", 1000);
static final ConfigOption<Integer> DURATION = new ConfigOption<Integer>(
"duration", true,
"Duration of a map task in seconds (MAX_INT for no limit)", SLIVE_PREFIX
+ ".duration", Integer.MAX_VALUE);
static final ConfigOption<Boolean> EXIT_ON_ERROR = new ConfigOption<Boolean>(
"exitOnError", false, "Exit on first error", SLIVE_PREFIX
+ ".exit.on.error", false);
static final ConfigOption<Integer> FILES = new ConfigOption<Integer>(
"files", true, "Max total number of files",
SLIVE_PREFIX + ".total.files", 10);
static final ConfigOption<Integer> DIR_SIZE = new ConfigOption<Integer>(
"dirSize", true, "Max files per directory", SLIVE_PREFIX + ".dir.size",
32);
static final ConfigOption<String> BASE_DIR = new ConfigOption<String>(
"baseDir", true, "Base directory path", SLIVE_PREFIX + ".base.dir",
"/test/slive");
static final ConfigOption<String> RESULT_FILE = new ConfigOption<String>(
"resFile", true, "Result file name", SLIVE_PREFIX + ".result.file",
"part-0000");
static final ConfigOption<Short> REPLICATION_AM = new ConfigOption<Short>(
"replication", true, "Min,max value for replication amount", SLIVE_PREFIX
+ ".file.replication", (short) 3);
static final ConfigOption<Long> BLOCK_SIZE = new ConfigOption<Long>(
"blockSize", true, "Min,max for dfs file block size", SLIVE_PREFIX
+ ".block.size", 64L * Constants.MEGABYTES);
static final ConfigOption<Long> READ_SIZE = new ConfigOption<Long>(
"readSize", true,
"Min,max for size to read (min=max=MAX_LONG=read entire file)",
SLIVE_PREFIX + ".op.read.size", null);
static final ConfigOption<Long> WRITE_SIZE = new ConfigOption<Long>(
"writeSize", true,
"Min,max for size to write (min=max=MAX_LONG=blocksize)", SLIVE_PREFIX
+ ".op.write.size", null);
static final ConfigOption<Long> SLEEP_TIME = new ConfigOption<Long>(
"sleep",
true,
"Min,max for millisecond of random sleep to perform (between operations)",
SLIVE_PREFIX + ".op.sleep.range", null);
static final ConfigOption<Long> APPEND_SIZE = new ConfigOption<Long>(
"appendSize", true,
"Min,max for size to append (min=max=MAX_LONG=blocksize)", SLIVE_PREFIX
+ ".op.append.size", null);
static final ConfigOption<Boolean> TRUNCATE_WAIT = new ConfigOption<Boolean>(
"truncateWait", true, "Should wait for truncate recovery", SLIVE_PREFIX
+ ".op.truncate.wait", true);
static final ConfigOption<Long> TRUNCATE_SIZE = new ConfigOption<Long>(
"truncateSize", true,
"Min,max for size to truncate (min=max=MAX_LONG=blocksize)", SLIVE_PREFIX
+ ".op.truncate.size", null);
static final ConfigOption<Long> RANDOM_SEED = new ConfigOption<Long>(
"seed", true, "Random number seed", SLIVE_PREFIX + ".seed", null);
// command line only options
static final Option HELP = new Option("help", false,
"Usage information");
static final Option CLEANUP = new Option("cleanup", true,
"Cleanup & remove directory after reporting");
// non slive specific settings
static final ConfigOption<String> QUEUE_NAME = new ConfigOption<String>(
"queue", true, "Queue name", "mapred.job.queue.name", "default");
static final ConfigOption<String> PACKET_SIZE = new ConfigOption<String>(
"packetSize", true, "Dfs write packet size", "dfs.write.packet.size",
null);
/**
* Hadoop configuration property name
*/
private String cfgOption;
/**
* Default value if no value is located by other means
*/
private T defaultValue;
ConfigOption(String cliOption, boolean hasArg, String description,
String cfgOption, T def) {
super(cliOption, hasArg, description);
this.cfgOption = cfgOption;
this.defaultValue = def;
}
/**
* @return the configuration option name to lookup in Configuration objects
* for this option
*/
String getCfgOption() {
return cfgOption;
}
/**
* @return the default object for this option
*/
T getDefault() {
return defaultValue;
}
}
| 5,938
| 36.352201
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/CreateOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and a random number of bytes to create
* that file with (from the write size option) and selects a random block size
* (from the block size option) and a random replication amount (from the
* replication option) and attempts to create a file with those options.
*
* This operation will capture statistics on success for bytes written, time
* taken (milliseconds), and success count and on failure it will capture the
* number of failures and the time taken (milliseconds) to fail.
*/
class CreateOp extends Operation {
private static final Log LOG = LogFactory.getLog(CreateOp.class);
private static int DEF_IO_BUFFER_SIZE = 4096;
private static final String IO_BUF_CONFIG = ("io.file.buffer.size");
CreateOp(ConfigExtractor cfg, Random rnd) {
super(CreateOp.class.getSimpleName(), cfg, rnd);
}
/**
* Returns the block size to use (aligned to nearest BYTES_PER_CHECKSUM if
* configuration says a value exists) - this will avoid the warnings caused by
* this not occurring and the file will not be created if it is not correct...
*
* @return long
*/
private long determineBlockSize() {
Range<Long> blockSizeRange = getConfig().getBlockSize();
long blockSize = Range.betweenPositive(getRandom(), blockSizeRange);
Long byteChecksum = getConfig().getByteCheckSum();
if (byteChecksum == null) {
return blockSize;
}
// adjust to nearest multiple
long full = (blockSize / byteChecksum) * byteChecksum;
long toFull = blockSize - full;
if (toFull >= (byteChecksum / 2)) {
full += byteChecksum;
}
// adjust if over extended
if (full > blockSizeRange.getUpper()) {
full = blockSizeRange.getUpper();
}
if (full < blockSizeRange.getLower()) {
full = blockSizeRange.getLower();
}
return full;
}
/**
* Gets the replication amount
*
* @return short
*/
private short determineReplication() {
Range<Short> replicationAmountRange = getConfig().getReplication();
Range<Long> repRange = new Range<Long>(replicationAmountRange.getLower()
.longValue(), replicationAmountRange.getUpper().longValue());
short replicationAmount = (short) Range.betweenPositive(getRandom(),
repRange);
return replicationAmount;
}
/**
* Gets the output buffering size to use
*
* @return int
*/
private int getBufferSize() {
return getConfig().getConfig().getInt(IO_BUF_CONFIG, DEF_IO_BUFFER_SIZE);
}
/**
* Gets the file to create
*
* @return Path
*/
protected Path getCreateFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
FSDataOutputStream os = null;
try {
Path fn = getCreateFile();
Range<Long> writeSizeRange = getConfig().getWriteSize();
long writeSize = 0;
long blockSize = determineBlockSize();
short replicationAmount = determineReplication();
if (getConfig().shouldWriteUseBlockSize()) {
writeSizeRange = getConfig().getBlockSize();
}
writeSize = Range.betweenPositive(getRandom(), writeSizeRange);
long bytesWritten = 0;
long timeTaken = 0;
int bufSize = getBufferSize();
boolean overWrite = false;
DataWriter writer = new DataWriter(getRandom());
LOG.info("Attempting to create file at " + fn + " of size "
+ Helper.toByteInfo(writeSize) + " using blocksize "
+ Helper.toByteInfo(blockSize) + " and replication amount "
+ replicationAmount);
{
// open & create
long startTime = Timer.now();
os = fs.create(fn, overWrite, bufSize, replicationAmount, blockSize);
timeTaken += Timer.elapsed(startTime);
// write the given length
GenerateOutput stats = writer.writeSegment(writeSize, os);
bytesWritten += stats.getBytesWritten();
timeTaken += stats.getTimeTaken();
// capture close time
startTime = Timer.now();
os.close();
os = null;
timeTaken += Timer.elapsed(startTime);
}
LOG.info("Created file at " + fn + " of size "
+ Helper.toByteInfo(bytesWritten) + " bytes using blocksize "
+ Helper.toByteInfo(blockSize) + " and replication amount "
+ replicationAmount + " in " + timeTaken + " milliseconds");
// collect all the stats
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BYTES_WRITTEN, bytesWritten));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with creating", e);
} finally {
if (os != null) {
try {
os.close();
} catch (IOException e) {
LOG.warn("Error closing create stream", e);
}
}
}
return out;
}
}
| 6,462
| 34.31694
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Helper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
/**
* Simple slive helper methods (may not exist in 0.20)
*/
class Helper {
private Helper() {
}
private static final String[] emptyStringArray = {};
/**
* Splits strings on comma and trims accordingly
*
* @param str
* @return array of split
*/
static String[] getTrimmedStrings(String str) {
if (null == str || "".equals(str.trim())) {
return emptyStringArray;
}
return str.trim().split("\\s*,\\s*");
}
/**
* Converts a byte value into a useful string for output
*
* @param bytes
*
* @return String
*/
static String toByteInfo(long bytes) {
StringBuilder str = new StringBuilder();
if (bytes < 0) {
bytes = 0;
}
str.append(bytes);
str.append(" bytes or ");
str.append(bytes / 1024);
str.append(" kilobytes or ");
str.append(bytes / (1024 * 1024));
str.append(" megabytes or ");
str.append(bytes / (1024 * 1024 * 1024));
str.append(" gigabytes");
return str.toString();
}
/**
* Stringifys an array using the given separator.
*
* @param args
* the array to format
* @param sep
* the separator string to use (ie comma or space)
*
* @return String representing that array
*/
static String stringifyArray(Object[] args, String sep) {
StringBuilder optStr = new StringBuilder();
for (int i = 0; i < args.length; ++i) {
optStr.append(args[i]);
if ((i + 1) != args.length) {
optStr.append(sep);
}
}
return optStr.toString();
}
}
| 2,397
| 25.644444
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/WeightSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.slive.Constants.Distribution;
import org.apache.hadoop.fs.slive.Constants.OperationType;
import org.apache.hadoop.fs.slive.Weights.UniformWeight;
import org.apache.hadoop.fs.slive.ObserveableOp.Observer;
/**
* This class is the main handler that selects operations to run using the
* currently held selection object. It configures and weights each operation and
* then hands the operations + weights off to the selector object to determine
* which one should be ran. If no operations are left to be ran then it will
* return null.
*/
class WeightSelector {
// what a weight calculation means
interface Weightable {
Double weight(int elapsed, int duration);
}
private static final Log LOG = LogFactory.getLog(WeightSelector.class);
private static class OperationInfo {
Integer amountLeft;
Operation operation;
Distribution distribution;
}
private Map<OperationType, OperationInfo> operations;
private Map<Distribution, Weightable> weights;
private RouletteSelector selector;
private OperationFactory factory;
WeightSelector(ConfigExtractor cfg, Random rnd) {
selector = new RouletteSelector(rnd);
factory = new OperationFactory(cfg, rnd);
configureOperations(cfg);
configureWeights(cfg);
}
protected RouletteSelector getSelector() {
return selector;
}
private void configureWeights(ConfigExtractor e) {
weights = new HashMap<Distribution, Weightable>();
weights.put(Distribution.UNIFORM, new UniformWeight());
// weights.put(Distribution.BEG, new BeginWeight());
// weights.put(Distribution.END, new EndWeight());
// weights.put(Distribution.MID, new MidWeight());
}
/**
* Determines how many initial operations a given operation data should have
*
* @param totalAm
* the total amount of operations allowed
*
* @param opData
* the given operation information (with a valid percentage >= 0)
*
* @return the number of items to allow to run
*
* @throws IllegalArgumentException
* if negative operations are determined
*/
static int determineHowMany(int totalAm, OperationData opData,
OperationType type) {
if (totalAm <= 0) {
return 0;
}
int amLeft = (int) Math.floor(opData.getPercent() * totalAm);
if (amLeft < 0) {
throw new IllegalArgumentException("Invalid amount " + amLeft
+ " determined for operation type " + type.name());
}
return amLeft;
}
/**
* Sets up the operation using the given configuration by setting up the
* number of operations to perform (and how many are left) and setting up the
* operation objects to be used throughout selection.
*
* @param cfg
* ConfigExtractor.
*/
private void configureOperations(ConfigExtractor cfg) {
operations = new TreeMap<OperationType, OperationInfo>();
Map<OperationType, OperationData> opinfo = cfg.getOperations();
int totalAm = cfg.getOpCount();
int opsLeft = totalAm;
NumberFormat formatter = Formatter.getPercentFormatter();
for (final OperationType type : opinfo.keySet()) {
OperationData opData = opinfo.get(type);
OperationInfo info = new OperationInfo();
info.distribution = opData.getDistribution();
int amLeft = determineHowMany(totalAm, opData, type);
opsLeft -= amLeft;
LOG
.info(type.name() + " has " + amLeft + " initial operations out of "
+ totalAm + " for its ratio "
+ formatter.format(opData.getPercent()));
info.amountLeft = amLeft;
Operation op = factory.getOperation(type);
// wrap operation in finalizer so that amount left gets decrements when
// its done
if (op != null) {
Observer fn = new Observer() {
public void notifyFinished(Operation op) {
OperationInfo opInfo = operations.get(type);
if (opInfo != null) {
--opInfo.amountLeft;
}
}
public void notifyStarting(Operation op) {
}
};
info.operation = new ObserveableOp(op, fn);
operations.put(type, info);
}
}
if (opsLeft > 0) {
LOG
.info(opsLeft
+ " left over operations found (due to inability to support partial operations)");
}
}
/**
* Selects an operation from the known operation set or returns null if none
* are available by applying the weighting algorithms and then handing off the
* weight operations to the selection object.
*
* @param elapsed
* the currently elapsed time (milliseconds) of the running program
* @param duration
* the maximum amount of milliseconds of the running program
*
* @return operation or null if none left
*/
Operation select(int elapsed, int duration) {
List<OperationWeight> validOps = new ArrayList<OperationWeight>(operations
.size());
for (OperationType type : operations.keySet()) {
OperationInfo opinfo = operations.get(type);
if (opinfo == null || opinfo.amountLeft <= 0) {
continue;
}
Weightable weighter = weights.get(opinfo.distribution);
if (weighter != null) {
OperationWeight weightOp = new OperationWeight(opinfo.operation,
weighter.weight(elapsed, duration));
validOps.add(weightOp);
} else {
throw new RuntimeException("Unable to get weight for distribution "
+ opinfo.distribution);
}
}
if (validOps.isEmpty()) {
return null;
}
return getSelector().select(validOps);
}
}
| 6,762
| 33.505102
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.hadoop.fs.slive.Constants.Distribution;
import org.apache.hadoop.util.StringUtils;
/**
* This class holds the data representing what an operations distribution and
* its percentage is (between 0 and 1) and provides operations to access those
* types and parse and unparse from and into strings
*/
class OperationData {
private static final String SEP = ",";
private Distribution distribution;
private Double percent;
OperationData(Distribution d, Double p) {
this.distribution = d;
this.percent = p;
}
/**
* Expects a comma separated list (where the first element is the ratio
* (between 0 and 100)) and the second element is the distribution (if
* non-existent then uniform will be selected). If an empty list is passed in
* then this element will just set the distribution (to uniform) and leave the
* percent as null.
*/
OperationData(String data) {
String pieces[] = Helper.getTrimmedStrings(data);
distribution = Distribution.UNIFORM;
percent = null;
if (pieces.length == 1) {
percent = (Double.parseDouble(pieces[0]) / 100.0d);
} else if (pieces.length >= 2) {
percent = (Double.parseDouble(pieces[0]) / 100.0d);
distribution = Distribution.valueOf(StringUtils.toUpperCase(pieces[1]));
}
}
/**
* Gets the distribution this operation represents
*
* @return Distribution
*/
Distribution getDistribution() {
return distribution;
}
/**
* Gets the 0 - 1 percent that this operations run ratio should be
*
* @return Double (or null if not given)
*/
Double getPercent() {
return percent;
}
/**
* Returns a string list representation of this object (if the percent is
* null) then NaN will be output instead. Format is percent,distribution.
*/
public String toString() {
StringBuilder str = new StringBuilder();
if (getPercent() != null) {
str.append(getPercent() * 100.0d);
} else {
str.append(Double.NaN);
}
str.append(SEP);
str.append(getDistribution().lowerName());
return str.toString();
}
}
| 2,954
| 30.774194
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DataHasher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.Random;
/**
* Class which is used to create the data to write for a given path and offset
* into that file for writing and later verification that the expected value is
* read at that file bytes offset
*/
class DataHasher {
private Random rnd;
DataHasher(long mixIn) {
this.rnd = new Random(mixIn);
}
/**
* @param offSet
* the byte offset into the file
*
* @return the data to be expected at that offset
*/
long generate(long offSet) {
return ((offSet * 47) ^ (rnd.nextLong() * 97)) * 37;
}
}
| 1,409
| 29
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Weights.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.hadoop.fs.slive.WeightSelector.Weightable;
/**
* Class to isolate the various weight algorithms we use.
*/
class Weights {
private Weights() {
}
/**
* A weight which always returns the same weight (1/3). Which will have an
* overall area of (1/3) unless otherwise provided.
*/
static class UniformWeight implements Weightable {
private static Double DEFAULT_WEIGHT = (1.0d / 3.0d);
private Double weight;
UniformWeight(double w) {
weight = w;
}
UniformWeight() {
this(DEFAULT_WEIGHT);
}
@Override // Weightable
public Double weight(int elapsed, int duration) {
return weight;
}
}
/**
* A weight which normalized the elapsed time and the duration to a value
* between 0 and 1 and applies the algorithm to form an output using the
* function (-2 * (x-0.5)^2) + 0.5 which initially (close to 0) has a value
* close to 0 and near input being 1 has a value close to 0 and near 0.5 has a
* value close to 0.5 (with overall area 0.3).
*/
static class MidWeight implements Weightable {
@Override // Weightable
public Double weight(int elapsed, int duration) {
double normalized = (double) elapsed / (double) duration;
double result = (-2.0d * Math.pow(normalized - 0.5, 2)) + 0.5d;
if (result < 0) {
result = 0;
}
if (result > 1) {
result = 1;
}
return result;
}
}
/**
* A weight which normalized the elapsed time and the duration to a value
* between 0 and 1 and applies the algorithm to form an output using the
* function (x)^2 which initially (close to 0) has a value close to 0 and near
* input being 1 has a value close to 1 (with overall area 1/3).
*/
static class EndWeight implements Weightable {
@Override // Weightable
public Double weight(int elapsed, int duration) {
double normalized = (double) elapsed / (double) duration;
double result = Math.pow(normalized, 2);
if (result < 0) {
result = 0;
}
if (result > 1) {
result = 1;
}
return result;
}
}
/**
* A weight which normalized the elapsed time and the duration to a value
* between 0 and 1 and applies the algorithm to form an output using the
* function (x-1)^2 which initially (close to 0) has a value close to 1 and
* near input being 1 has a value close to 0 (with overall area 1/3).
*/
static class BeginWeight implements Weightable {
@Override // Weightable
public Double weight(int elapsed, int duration) {
double normalized = (double) elapsed / (double) duration;
double result = Math.pow((normalized - 1), 2);
if (result < 0) {
result = 0;
}
if (result > 1) {
result = 1;
}
return result;
}
}
}
| 3,696
| 28.110236
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Range.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.Random;
/**
* Class that represents a numeric minimum and a maximum
*
* @param <T>
* the type of number being used
*/
class Range<T extends Number> {
private static final String SEP = ",";
private T min;
private T max;
Range(T min, T max) {
this.min = min;
this.max = max;
}
/**
* @return the minimum value
*/
T getLower() {
return min;
}
/**
* @return the maximum value
*/
T getUpper() {
return max;
}
public String toString() {
return min + SEP + max;
}
/**
* Gets a long number between two values
*
* @param rnd
* @param range
*
* @return long
*/
static long betweenPositive(Random rnd, Range<Long> range) {
if (range.getLower().equals(range.getUpper())) {
return range.getLower();
}
long nextRnd = rnd.nextLong();
long normRange = (range.getUpper() - range.getLower() + 1);
return Math.abs(nextRnd % normRange) + range.getLower();
}
}
| 1,838
| 22.883117
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReadOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.DataVerifier.VerifyOutput;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and selects a random read size (from
* the read size option) and reads from the start of that file to the read size
* (or the full file) and verifies the bytes that were written there.
*
* This operation will capture statistics on success the time taken to read that
* file and the number of successful readings that occurred as well as the
* number of bytes read and the number of chunks verified and the number of
* chunks which failed verification and on failure or error it will capture the
* number of failures and the amount of time taken to fail
*/
class ReadOp extends Operation {
private static final Log LOG = LogFactory.getLog(ReadOp.class);
ReadOp(ConfigExtractor cfg, Random rnd) {
super(ReadOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file name to read
*
* @return Path
*/
protected Path getReadFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
DataInputStream is = null;
try {
Path fn = getReadFile();
Range<Long> readSizeRange = getConfig().getReadSize();
long readSize = 0;
String readStrAm = "";
if (getConfig().shouldReadFullFile()) {
readSize = Long.MAX_VALUE;
readStrAm = "full file";
} else {
readSize = Range.betweenPositive(getRandom(), readSizeRange);
readStrAm = Helper.toByteInfo(readSize);
}
long timeTaken = 0;
long chunkSame = 0;
long chunkDiff = 0;
long bytesRead = 0;
long startTime = 0;
DataVerifier vf = new DataVerifier();
LOG.info("Attempting to read file at " + fn + " of size (" + readStrAm
+ ")");
{
// open
startTime = Timer.now();
is = fs.open(fn);
timeTaken += Timer.elapsed(startTime);
// read & verify
VerifyOutput vo = vf.verifyFile(readSize, is);
timeTaken += vo.getReadTime();
chunkSame += vo.getChunksSame();
chunkDiff += vo.getChunksDifferent();
bytesRead += vo.getBytesRead();
// capture close time
startTime = Timer.now();
is.close();
is = null;
timeTaken += Timer.elapsed(startTime);
}
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BYTES_READ, bytesRead));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.CHUNKS_VERIFIED, chunkSame));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.CHUNKS_UNVERIFIED, chunkDiff));
LOG.info("Read " + Helper.toByteInfo(bytesRead) + " of " + fn + " with "
+ chunkSame + " chunks being same as expected and " + chunkDiff
+ " chunks being different than expected in " + timeTaken
+ " milliseconds");
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with reading", e);
} catch (BadFileException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BAD_FILES, 1L));
LOG.warn("Error reading bad file", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error reading", e);
} finally {
if (is != null) {
try {
is.close();
} catch (IOException e) {
LOG.warn("Error closing read stream", e);
}
}
}
return out;
}
}
| 5,155
| 35.309859
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ListOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random directory and attempts to list that
* directory (if it exists)
*
* This operation will capture statistics on success the time taken to list that
* directory and the number of successful listings that occurred as well as the
* number of entries in the selected directory and on failure or error it will
* capture the number of failures and the amount of time taken to fail
*/
class ListOp extends Operation {
private static final Log LOG = LogFactory.getLog(ListOp.class);
ListOp(ConfigExtractor cfg, Random rnd) {
super(ListOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the directory to list
*
* @return Path
*/
protected Path getDirectory() {
Path dir = getFinder().getDirectory();
return dir;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
Path dir = getDirectory();
long dirEntries = 0;
long timeTaken = 0;
{
long startTime = Timer.now();
FileStatus[] files = fs.listStatus(dir);
timeTaken = Timer.elapsed(startTime);
dirEntries = files.length;
}
// log stats
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.DIR_ENTRIES, dirEntries));
LOG.info("Directory " + dir + " has " + dirEntries + " entries");
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with listing", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with listing", e);
}
return out;
}
}
| 3,211
| 33.170213
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.StringUtils;
/**
* An operation output has the following object format whereby simple types are
* represented as a key of dataType:operationType*measurementType and these
* simple types can be combined (mainly in the reducer) using there given types
* into a single operation output.
*
* Combination is done based on the data types and the following convention is
* followed (in the following order). If one is a string then the other will be
* concated as a string with a ";" separator. If one is a double then the other
* will be added as a double and the output will be a double. If one is a float
* then the other will be added as a float and the the output will be a float.
* Following this if one is a long the other will be added as a long and the
* output type will be a long and if one is a integer the other will be added as
* a integer and the output type will be an integer.
*/
class OperationOutput {
private OutputType dataType;
private String opType, measurementType;
private Object value;
private static final String TYPE_SEP = ":";
private static final String MEASUREMENT_SEP = "*";
private static final String STRING_SEP = ";";
static enum OutputType {
STRING, FLOAT, LONG, DOUBLE, INTEGER
}
/**
* Parses a given key according to the expected key format and forms the given
* segments.
*
* @param key
* the key in expected dataType:operationType*measurementType format
* @param value
* a generic value expected to match the output type
* @throws IllegalArgumentException
* if invalid format
*/
OperationOutput(String key, Object value) {
int place = key.indexOf(TYPE_SEP);
if (place == -1) {
throw new IllegalArgumentException(
"Invalid key format - no type seperator - " + TYPE_SEP);
}
try {
dataType = OutputType.valueOf(
StringUtils.toUpperCase(key.substring(0, place)));
} catch (Exception e) {
throw new IllegalArgumentException(
"Invalid key format - invalid output type", e);
}
key = key.substring(place + 1);
place = key.indexOf(MEASUREMENT_SEP);
if (place == -1) {
throw new IllegalArgumentException(
"Invalid key format - no measurement seperator - " + MEASUREMENT_SEP);
}
opType = key.substring(0, place);
measurementType = key.substring(place + 1);
this.value = value;
}
OperationOutput(Text key, Object value) {
this(key.toString(), value);
}
public String toString() {
return getKeyString() + " (" + this.value + ")";
}
OperationOutput(OutputType dataType, String opType, String measurementType,
Object value) {
this.dataType = dataType;
this.opType = opType;
this.measurementType = measurementType;
this.value = value;
}
/**
* Merges according to the documented rules for merging. Only will merge if
* measurement type and operation type is the same.
*
* @param o1
* the first object to merge with the second
* @param o2
* the second object.
*
* @return OperationOutput merged output.
*
* @throws IllegalArgumentException
* if unable to merge due to incompatible formats/types
*/
static OperationOutput merge(OperationOutput o1, OperationOutput o2) {
if (o1.getMeasurementType().equals(o2.getMeasurementType())
&& o1.getOperationType().equals(o2.getOperationType())) {
Object newvalue = null;
OutputType newtype = null;
String opType = o1.getOperationType();
String mType = o1.getMeasurementType();
if (o1.getOutputType() == OutputType.STRING
|| o2.getOutputType() == OutputType.STRING) {
newtype = OutputType.STRING;
StringBuilder str = new StringBuilder();
str.append(o1.getValue());
str.append(STRING_SEP);
str.append(o2.getValue());
newvalue = str.toString();
} else if (o1.getOutputType() == OutputType.DOUBLE
|| o2.getOutputType() == OutputType.DOUBLE) {
newtype = OutputType.DOUBLE;
try {
newvalue = Double.parseDouble(o1.getValue().toString())
+ Double.parseDouble(o2.getValue().toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Unable to combine a type with a double " + o1 + " & " + o2, e);
}
} else if (o1.getOutputType() == OutputType.FLOAT
|| o2.getOutputType() == OutputType.FLOAT) {
newtype = OutputType.FLOAT;
try {
newvalue = Float.parseFloat(o1.getValue().toString())
+ Float.parseFloat(o2.getValue().toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Unable to combine a type with a float " + o1 + " & " + o2, e);
}
} else if (o1.getOutputType() == OutputType.LONG
|| o2.getOutputType() == OutputType.LONG) {
newtype = OutputType.LONG;
try {
newvalue = Long.parseLong(o1.getValue().toString())
+ Long.parseLong(o2.getValue().toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Unable to combine a type with a long " + o1 + " & " + o2, e);
}
} else if (o1.getOutputType() == OutputType.INTEGER
|| o2.getOutputType() == OutputType.INTEGER) {
newtype = OutputType.INTEGER;
try {
newvalue = Integer.parseInt(o1.getValue().toString())
+ Integer.parseInt(o2.getValue().toString());
} catch (NumberFormatException e) {
throw new IllegalArgumentException(
"Unable to combine a type with an int " + o1 + " & " + o2, e);
}
}
return new OperationOutput(newtype, opType, mType, newvalue);
} else {
throw new IllegalArgumentException("Unable to combine dissimilar types "
+ o1 + " & " + o2);
}
}
/**
* Formats the key for output
*
* @return String
*/
private String getKeyString() {
StringBuilder str = new StringBuilder();
str.append(getOutputType().name());
str.append(TYPE_SEP);
str.append(getOperationType());
str.append(MEASUREMENT_SEP);
str.append(getMeasurementType());
return str.toString();
}
/**
* Retrieves the key in a hadoop text object
*
* @return Text text output
*/
Text getKey() {
return new Text(getKeyString());
}
/**
* Gets the output value in text format
*
* @return Text
*/
Text getOutputValue() {
StringBuilder valueStr = new StringBuilder();
valueStr.append(getValue());
return new Text(valueStr.toString());
}
/**
* Gets the object that represents this value (expected to match the output
* data type)
*
* @return Object
*/
Object getValue() {
return value;
}
/**
* Gets the output data type of this class.
*/
OutputType getOutputType() {
return dataType;
}
/**
* Gets the operation type this object represents.
*
* @return String
*/
String getOperationType() {
return opType;
}
/**
* Gets the measurement type this object represents.
*
* @return String
*/
String getMeasurementType() {
return measurementType;
}
}
| 8,253
| 31.753968
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SlivePartitioner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Partitioner;
/**
* The partitioner partitions the map output according to the operation type.
* The partition number is the hash of the operation type modular the total
* number of the reducers.
*/
public class SlivePartitioner implements Partitioner<Text, Text> {
@Override // JobConfigurable
public void configure(JobConf conf) {}
@Override // Partitioner
public int getPartition(Text key, Text value, int numPartitions) {
OperationOutput oo = new OperationOutput(key, value);
return (oo.getOperationType().hashCode() & Integer.MAX_VALUE) % numPartitions;
}
}
| 1,537
| 38.435897
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Operation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* An operation provides these abstractions and if it desires to perform any
* operations it must implement a override of the run() function to provide
* varying output to be captured.
*/
abstract class Operation {
private ConfigExtractor config;
private PathFinder finder;
private String type;
private Random rnd;
protected Operation(String type, ConfigExtractor cfg, Random rnd) {
this.config = cfg;
this.type = type;
this.rnd = rnd;
// Use a new Random instance so that the sequence of file names produced is
// the same even in case of unsuccessful operations
this.finder = new PathFinder(cfg, new Random(rnd.nextInt()));
}
/**
* Gets the configuration object this class is using
*
* @return ConfigExtractor
*/
protected ConfigExtractor getConfig() {
return this.config;
}
/**
* Gets the random number generator to use for this operation
*
* @return Random
*/
protected Random getRandom() {
return this.rnd;
}
/**
* Gets the type of operation that this class belongs to
*
* @return String
*/
String getType() {
return type;
}
/**
* Gets the path finding/generating instance that this class is using
*
* @return PathFinder
*/
protected PathFinder getFinder() {
return this.finder;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
public String toString() {
return getType();
}
/**
* This run() method simply sets up the default output container and adds in a
* data member to keep track of the number of operations that occurred
*
* @param fs
* FileSystem object to perform operations with
*
* @return List of operation outputs to be collected and output in the overall
* map reduce operation (or empty or null if none)
*/
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = new LinkedList<OperationOutput>();
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OP_COUNT, 1L));
return out;
}
}
| 3,110
| 26.776786
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DummyInputFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
/**
* A input format which returns one dummy key and value
*/
class DummyInputFormat implements InputFormat<Object, Object> {
static class EmptySplit implements InputSplit {
public void write(DataOutput out) throws IOException {
}
public void readFields(DataInput in) throws IOException {
}
public long getLength() {
return 0L;
}
public String[] getLocations() {
return new String[0];
}
}
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
InputSplit[] splits = new InputSplit[numSplits];
for (int i = 0; i < splits.length; ++i) {
splits[i] = new EmptySplit();
}
return splits;
}
public RecordReader<Object, Object> getRecordReader(InputSplit split,
JobConf job, Reporter reporter) throws IOException {
return new RecordReader<Object, Object>() {
boolean once = false;
public boolean next(Object key, Object value) throws IOException {
if (!once) {
once = true;
return true;
}
return false;
}
public Object createKey() {
return new Object();
}
public Object createValue() {
return new Object();
}
public long getPos() throws IOException {
return 0L;
}
public void close() throws IOException {
}
public float getProgress() throws IOException {
return 0.0f;
}
};
}
}
| 2,600
| 26.378947
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.hadoop.fs.slive.Constants.OperationType;
/**
* Factory class which returns instances of operations given there operation
* type enumeration (in string or enumeration format).
*/
class OperationFactory {
private Map<OperationType, Operation> typedOperations;
private ConfigExtractor config;
private Random rnd;
OperationFactory(ConfigExtractor cfg, Random rnd) {
this.typedOperations = new HashMap<OperationType, Operation>();
this.config = cfg;
this.rnd = rnd;
}
/**
* Gets an operation instance (cached) for a given operation type
*
* @param type
* the operation type to fetch for
*
* @return Operation operation instance or null if it can not be fetched.
*/
Operation getOperation(OperationType type) {
Operation op = typedOperations.get(type);
if (op != null) {
return op;
}
switch (type) {
case READ:
op = new ReadOp(this.config, rnd);
break;
case LS:
op = new ListOp(this.config, rnd);
break;
case MKDIR:
op = new MkdirOp(this.config, rnd);
break;
case APPEND:
op = new AppendOp(this.config, rnd);
break;
case RENAME:
op = new RenameOp(this.config, rnd);
break;
case DELETE:
op = new DeleteOp(this.config, rnd);
break;
case CREATE:
op = new CreateOp(this.config, rnd);
break;
case TRUNCATE:
op = new TruncateOp(this.config, rnd);
break;
}
typedOperations.put(type, op);
return op;
}
}
| 2,456
| 27.569767
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveReducer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.StringUtils;
/**
* The slive reducer which iterates over the given input values and merges them
* together into a final output value.
*/
public class SliveReducer extends MapReduceBase implements
Reducer<Text, Text, Text, Text> {
private static final Log LOG = LogFactory.getLog(SliveReducer.class);
private ConfigExtractor config;
/**
* Logs to the given reporter and logs to the internal logger at info level
*
* @param r
* the reporter to set status on
* @param msg
* the message to log
*/
private void logAndSetStatus(Reporter r, String msg) {
r.setStatus(msg);
LOG.info(msg);
}
/**
* Fetches the config this object uses
*
* @return ConfigExtractor
*/
private ConfigExtractor getConfig() {
return config;
}
/*
* (non-Javadoc)
*
* @see org.apache.hadoop.mapred.Reducer#reduce(java.lang.Object,
* java.util.Iterator, org.apache.hadoop.mapred.OutputCollector,
* org.apache.hadoop.mapred.Reporter)
*/
@Override // Reducer
public void reduce(Text key, Iterator<Text> values,
OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
OperationOutput collector = null;
int reduceAm = 0;
int errorAm = 0;
logAndSetStatus(reporter, "Iterating over reduction values for key " + key);
while (values.hasNext()) {
Text value = values.next();
try {
OperationOutput val = new OperationOutput(key, value);
if (collector == null) {
collector = val;
} else {
collector = OperationOutput.merge(collector, val);
}
LOG.info("Combined " + val + " into/with " + collector);
++reduceAm;
} catch (Exception e) {
++errorAm;
logAndSetStatus(reporter, "Error iterating over reduction input "
+ value + " due to : " + StringUtils.stringifyException(e));
if (getConfig().shouldExitOnFirstError()) {
break;
}
}
}
logAndSetStatus(reporter, "Reduced " + reduceAm + " values with " + errorAm
+ " errors");
if (collector != null) {
logAndSetStatus(reporter, "Writing output " + collector.getKey() + " : "
+ collector.getOutputValue());
output.collect(collector.getKey(), collector.getOutputValue());
}
}
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.mapred.MapReduceBase#configure(org.apache.hadoop.mapred
* .JobConf)
*/
@Override // MapReduceBase
public void configure(JobConf conf) {
config = new ConfigExtractor(conf);
ConfigExtractor.dumpOptions(config);
}
}
| 3,898
| 30.443548
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ObserveableOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.List;
import org.apache.hadoop.fs.FileSystem;
/**
* Operation which wraps a given operation and allows an observer to be notified
* when the operation is about to start and when the operation has finished
*/
class ObserveableOp extends Operation {
/**
* The observation interface which class that wish to monitor starting and
* ending events must implement.
*/
interface Observer {
void notifyStarting(Operation op);
void notifyFinished(Operation op);
}
private Operation op;
private Observer observer;
ObserveableOp(Operation op, Observer observer) {
super(op.getType(), op.getConfig(), op.getRandom());
this.op = op;
this.observer = observer;
}
/**
* Proxy to underlying operation toString()
*/
public String toString() {
return op.toString();
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> result = null;
try {
if (observer != null) {
observer.notifyStarting(op);
}
result = op.run(fs);
} finally {
if (observer != null) {
observer.notifyFinished(op);
}
}
return result;
}
}
| 2,025
| 26.753425
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/AppendOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and appends a random amount of bytes
* (selected from the configuration for append size) to that file if it exists.
*
* This operation will capture statistics on success for bytes written, time
* taken (milliseconds), and success count and on failure it will capture the
* number of failures and the time taken (milliseconds) to fail.
*/
class AppendOp extends Operation {
private static final Log LOG = LogFactory.getLog(AppendOp.class);
AppendOp(ConfigExtractor cfg, Random rnd) {
super(AppendOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file to append to
*
* @return Path
*/
protected Path getAppendFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
OutputStream os = null;
try {
Path fn = getAppendFile();
// determine file status for file length requirement
// to know if should fill in partial bytes
Range<Long> appendSizeRange = getConfig().getAppendSize();
if (getConfig().shouldAppendUseBlockSize()) {
appendSizeRange = getConfig().getBlockSize();
}
long appendSize = Range.betweenPositive(getRandom(), appendSizeRange);
long timeTaken = 0, bytesAppended = 0;
DataWriter writer = new DataWriter(getRandom());
LOG.info("Attempting to append to file at " + fn + " of size "
+ Helper.toByteInfo(appendSize));
{
// open
long startTime = Timer.now();
os = fs.append(fn);
timeTaken += Timer.elapsed(startTime);
// append given length
GenerateOutput stats = writer.writeSegment(appendSize, os);
timeTaken += stats.getTimeTaken();
bytesAppended += stats.getBytesWritten();
// capture close time
startTime = Timer.now();
os.close();
os = null;
timeTaken += Timer.elapsed(startTime);
}
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BYTES_WRITTEN, bytesAppended));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Appended " + Helper.toByteInfo(bytesAppended) + " to file "
+ fn + " in " + timeTaken + " milliseconds");
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with appending", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with appending", e);
} finally {
if (os != null) {
try {
os.close();
} catch (IOException e) {
LOG.warn("Error with closing append stream", e);
}
}
}
return out;
}
}
| 4,320
| 35.008333
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.ArgumentParser.ParsedOutput;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Slive test entry point + main program
*
* This program will output a help message given -help which can be used to
* determine the program options and configuration which will affect the program
* runtime. The program will take these options, either from configuration or
* command line and process them (and merge) and then establish a job which will
* thereafter run a set of mappers & reducers and then the output of the
* reduction will be reported on.
*
* The number of maps is specified by "slive.maps".
* The number of reduces is specified by "slive.reduces".
*/
public class SliveTest implements Tool {
private static final Log LOG = LogFactory.getLog(SliveTest.class);
// ensures the hdfs configurations are loaded if they exist
static {
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
private Configuration base;
public SliveTest(Configuration base) {
this.base = base;
}
public int run(String[] args) {
ParsedOutput parsedOpts = null;
try {
ArgumentParser argHolder = new ArgumentParser(args);
parsedOpts = argHolder.parse();
if (parsedOpts.shouldOutputHelp()) {
parsedOpts.outputHelp();
return 1;
}
} catch (Exception e) {
LOG.error("Unable to parse arguments due to error: ", e);
return 1;
}
LOG.info("Running with option list " + Helper.stringifyArray(args, " "));
ConfigExtractor config = null;
try {
ConfigMerger cfgMerger = new ConfigMerger();
Configuration cfg = cfgMerger.getMerged(parsedOpts,
new Configuration(base));
if (cfg != null) {
config = new ConfigExtractor(cfg);
}
} catch (Exception e) {
LOG.error("Unable to merge config due to error: ", e);
return 1;
}
if (config == null) {
LOG.error("Unable to merge config & options!");
return 1;
}
try {
LOG.info("Options are:");
ConfigExtractor.dumpOptions(config);
} catch (Exception e) {
LOG.error("Unable to dump options due to error: ", e);
return 1;
}
boolean jobOk = false;
try {
LOG.info("Running job:");
runJob(config);
jobOk = true;
} catch (Exception e) {
LOG.error("Unable to run job due to error: ", e);
}
if (jobOk) {
try {
LOG.info("Reporting on job:");
writeReport(config);
} catch (Exception e) {
LOG.error("Unable to report on job due to error: ", e);
}
}
// attempt cleanup (not critical)
boolean cleanUp = getBool(parsedOpts
.getValue(ConfigOption.CLEANUP.getOpt()));
if (cleanUp) {
try {
LOG.info("Cleaning up job:");
cleanup(config);
} catch (Exception e) {
LOG.error("Unable to cleanup job due to error: ", e);
}
}
// all mostly worked
if (jobOk) {
return 0;
}
// maybe didn't work
return 1;
}
/**
* Checks if a string is a boolean or not and what type
*
* @param val
* val to check
* @return boolean
*/
private boolean getBool(String val) {
if (val == null) {
return false;
}
String cleanupOpt = StringUtils.toLowerCase(val).trim();
if (cleanupOpt.equals("true") || cleanupOpt.equals("1")) {
return true;
} else {
return false;
}
}
/**
* Sets up a job conf for the given job using the given config object. Ensures
* that the correct input format is set, the mapper and and reducer class and
* the input and output keys and value classes along with any other job
* configuration.
*
* @param config
* @return JobConf representing the job to be ran
* @throws IOException
*/
private JobConf getJob(ConfigExtractor config) throws IOException {
JobConf job = new JobConf(config.getConfig(), SliveTest.class);
job.setInputFormat(DummyInputFormat.class);
FileOutputFormat.setOutputPath(job, config.getOutputPath());
job.setMapperClass(SliveMapper.class);
job.setPartitionerClass(SlivePartitioner.class);
job.setReducerClass(SliveReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setOutputFormat(TextOutputFormat.class);
TextOutputFormat.setCompressOutput(job, false);
job.setNumReduceTasks(config.getReducerAmount());
job.setNumMapTasks(config.getMapAmount());
return job;
}
/**
* Runs the job given the provided config
*
* @param config
* the config to run the job with
*
* @throws IOException
* if can not run the given job
*/
private void runJob(ConfigExtractor config) throws IOException {
JobClient.runJob(getJob(config));
}
/**
* Attempts to write the report to the given output using the specified
* config. It will open up the expected reducer output file and read in its
* contents and then split up by operation output and sort by operation type
* and then for each operation type it will generate a report to the specified
* result file and the console.
*
* @param cfg
* the config specifying the files and output
*
* @throws Exception
* if files can not be opened/closed/read or invalid format
*/
private void writeReport(ConfigExtractor cfg) throws Exception {
Path dn = cfg.getOutputPath();
LOG.info("Writing report using contents of " + dn);
FileSystem fs = dn.getFileSystem(cfg.getConfig());
FileStatus[] reduceFiles = fs.listStatus(dn);
BufferedReader fileReader = null;
PrintWriter reportWriter = null;
try {
List<OperationOutput> noOperations = new ArrayList<OperationOutput>();
Map<String, List<OperationOutput>> splitTypes = new TreeMap<String, List<OperationOutput>>();
for(FileStatus fn : reduceFiles) {
if(!fn.getPath().getName().startsWith("part")) continue;
fileReader = new BufferedReader(new InputStreamReader(
new DataInputStream(fs.open(fn.getPath()))));
String line;
while ((line = fileReader.readLine()) != null) {
String pieces[] = line.split("\t", 2);
if (pieces.length == 2) {
OperationOutput data = new OperationOutput(pieces[0], pieces[1]);
String op = (data.getOperationType());
if (op != null) {
List<OperationOutput> opList = splitTypes.get(op);
if (opList == null) {
opList = new ArrayList<OperationOutput>();
}
opList.add(data);
splitTypes.put(op, opList);
} else {
noOperations.add(data);
}
} else {
throw new IOException("Unparseable line " + line);
}
}
fileReader.close();
fileReader = null;
}
File resFile = null;
if (cfg.getResultFile() != null) {
resFile = new File(cfg.getResultFile());
}
if (resFile != null) {
LOG.info("Report results being placed to logging output and to file "
+ resFile.getCanonicalPath());
reportWriter = new PrintWriter(new FileOutputStream(resFile));
} else {
LOG.info("Report results being placed to logging output");
}
ReportWriter reporter = new ReportWriter();
if (!noOperations.isEmpty()) {
reporter.basicReport(noOperations, reportWriter);
}
for (String opType : splitTypes.keySet()) {
reporter.opReport(opType, splitTypes.get(opType), reportWriter);
}
} finally {
if (fileReader != null) {
fileReader.close();
}
if (reportWriter != null) {
reportWriter.close();
}
}
}
/**
* Cleans up the base directory by removing it
*
* @param cfg
* ConfigExtractor which has location of base directory
*
* @throws IOException
*/
private void cleanup(ConfigExtractor cfg) throws IOException {
Path base = cfg.getBaseDirectory();
if (base != null) {
LOG.info("Attempting to recursively delete " + base);
FileSystem fs = base.getFileSystem(cfg.getConfig());
fs.delete(base, true);
}
}
/**
* The main program entry point. Sets up and parses the command line options,
* then merges those options and then dumps those options and the runs the
* corresponding map/reduce job that those operations represent and then
* writes the report for the output of the run that occurred.
*
* @param args
* command line options
*/
public static void main(String[] args) throws Exception {
Configuration startCfg = new Configuration(true);
SliveTest runner = new SliveTest(startCfg);
int ec = ToolRunner.run(runner, args);
System.exit(ec);
}
@Override // Configurable
public Configuration getConf() {
return this.base;
}
@Override // Configurable
public void setConf(Configuration conf) {
this.base = conf;
}
}
| 10,853
| 31.990881
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/PathFinder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.Random;
import org.apache.hadoop.fs.Path;
/**
* Class which generates a file or directory path using a simple random
* generation algorithm stated in http://issues.apache.org/jira/browse/HDFS-708
*/
class PathFinder {
private static enum Type {
FILE, DIRECTORY
}
private static final String DIR_PREFIX = "sl_dir_";
private static final String FILE_PREFIX = "sl_file_";
private Path basePath;
private ConfigExtractor config;
private Random rnd;
PathFinder(ConfigExtractor cfg, Random rnd) {
this.basePath = cfg.getDataPath();
this.config = cfg;
this.rnd = rnd;
}
/**
* This function uses a simple recursive algorithm to generate a path name
* using the current id % limitPerDir and using current id / limitPerDir to
* form the rest of the tree segments
*
* @param curId
* the current id to use for determining the current directory id %
* per directory limit and then used for determining the next segment
* of the path to use, if <= zero this will return the base path
* @param limitPerDir
* the per directory file limit used in modulo and division
* operations to calculate the file name and path tree
* @param type
* directory or file enumeration
* @return Path
*/
private Path getPath(int curId, int limitPerDir, Type type) {
if (curId <= 0) {
return basePath;
}
String name = "";
switch (type) {
case FILE:
name = FILE_PREFIX + new Integer(curId % limitPerDir).toString();
break;
case DIRECTORY:
name = DIR_PREFIX + new Integer(curId % limitPerDir).toString();
break;
}
Path base = getPath((curId / limitPerDir), limitPerDir, Type.DIRECTORY);
return new Path(base, name);
}
/**
* Gets a file path using the given configuration provided total files and
* files per directory
*
* @return path
*/
Path getFile() {
int fileLimit = config.getTotalFiles();
int dirLimit = config.getDirSize();
int startPoint = 1 + rnd.nextInt(fileLimit);
return getPath(startPoint, dirLimit, Type.FILE);
}
/**
* Gets a directory path using the given configuration provided total files
* and files per directory
*
* @return path
*/
Path getDirectory() {
int fileLimit = config.getTotalFiles();
int dirLimit = config.getDirSize();
int startPoint = rnd.nextInt(fileLimit);
return getPath(startPoint, dirLimit, Type.DIRECTORY);
}
}
| 3,366
| 30.175926
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/BadFileException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
/**
* Exception used to signify file reading failures where headers are bad or an
* unexpected EOF occurs when it should not.
*/
class BadFileException extends IOException {
private static final long serialVersionUID = 463201983951298129L;
BadFileException(String msg) {
super(msg);
}
BadFileException(String msg, Throwable e) {
super(msg, e);
}
}
| 1,247
| 30.2
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DeleteOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and attempts to delete that file (if it
* exists)
*
* This operation will capture statistics on success the time taken to delete
* and the number of successful deletions that occurred and on failure or error
* it will capture the number of failures and the amount of time taken to fail
*/
class DeleteOp extends Operation {
private static final Log LOG = LogFactory.getLog(DeleteOp.class);
DeleteOp(ConfigExtractor cfg, Random rnd) {
super(DeleteOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file to delete
*/
protected Path getDeleteFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
Path fn = getDeleteFile();
long timeTaken = 0;
boolean deleteStatus = false;
{
long startTime = Timer.now();
deleteStatus = fs.delete(fn, false);
timeTaken = Timer.elapsed(startTime);
}
// collect the stats
if (!deleteStatus) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.info("Could not delete " + fn);
} else {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Could delete " + fn);
}
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with deleting", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with deleting", e);
}
return out;
}
}
| 3,107
| 32.419355
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/RouletteSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.List;
import java.util.Random;
/**
* A selection object which simulates a roulette wheel whereby all operation
* have a weight and the total value of the wheel is the combined weight and
* during selection a random number (0, total weight) is selected and then the
* operation that is at that value will be selected. So for a set of operations
* with uniform weight they will all have the same probability of being
* selected. Operations which choose to have higher weights will have higher
* likelihood of being selected (and the same goes for lower weights).
*/
class RouletteSelector {
private Random picker;
RouletteSelector(Random rnd) {
picker = rnd;
}
Operation select(List<OperationWeight> ops) {
if (ops.isEmpty()) {
return null;
}
double totalWeight = 0;
for (OperationWeight w : ops) {
if (w.getWeight() < 0) {
throw new IllegalArgumentException("Negative weights not allowed");
}
totalWeight += w.getWeight();
}
// roulette wheel selection
double sAm = picker.nextDouble() * totalWeight;
int index = 0;
for (int i = 0; i < ops.size(); ++i) {
sAm -= ops.get(i).getWeight();
if (sAm <= 0) {
index = i;
break;
}
}
return ops.get(index).getOperation();
}
}
| 2,164
| 31.80303
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TruncateOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random file and truncates a random amount of bytes
* (selected from the configuration for truncate size) from that file,
* if it exists.
*
* This operation will capture statistics on success for bytes written, time
* taken (milliseconds), and success count and on failure it will capture the
* number of failures and the time taken (milliseconds) to fail.
*/
class TruncateOp extends Operation {
private static final Log LOG = LogFactory.getLog(TruncateOp.class);
TruncateOp(ConfigExtractor cfg, Random rnd) {
super(TruncateOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file to truncate from
*
* @return Path
*/
protected Path getTruncateFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
Path fn = getTruncateFile();
boolean waitOnTruncate = getConfig().shouldWaitOnTruncate();
long currentSize = fs.getFileStatus(fn).getLen();
// determine file status for file length requirement
// to know if should fill in partial bytes
Range<Long> truncateSizeRange = getConfig().getTruncateSize();
if (getConfig().shouldTruncateUseBlockSize()) {
truncateSizeRange = getConfig().getBlockSize();
}
long truncateSize = Math.max(0L,
currentSize - Range.betweenPositive(getRandom(), truncateSizeRange));
long timeTaken = 0;
LOG.info("Attempting to truncate file at " + fn + " to size "
+ Helper.toByteInfo(truncateSize));
{
// truncate
long startTime = Timer.now();
boolean completed = fs.truncate(fn, truncateSize);
if(!completed && waitOnTruncate)
waitForRecovery(fs, fn, truncateSize);
timeTaken += Timer.elapsed(startTime);
}
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BYTES_WRITTEN, 0));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Truncate file " + fn + " to " + Helper.toByteInfo(truncateSize)
+ " in " + timeTaken + " milliseconds");
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with truncating", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with truncating", e);
}
return out;
}
private void waitForRecovery(FileSystem fs, Path fn, long newLength)
throws IOException {
LOG.info("Waiting on truncate file recovery for " + fn);
for(;;) {
FileStatus stat = fs.getFileStatus(fn);
if(stat.getLen() == newLength) break;
try {Thread.sleep(1000);} catch(InterruptedException ignored) {}
}
}
}
| 4,298
| 36.382609
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DataWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Random;
import static org.apache.hadoop.fs.slive.Constants.BYTES_PER_LONG;
/**
* Class which handles generating data (creating and appending) along with
* ensuring the correct data is written out for the given path name so that it
* can be later verified
*/
class DataWriter {
/**
* Header size in bytes
*/
private static final int HEADER_LENGTH = (BYTES_PER_LONG * 2);
private int bufferSize;
private Random rnd;
/**
* Class used to hold the number of bytes written and time taken for write
* operations for callers to use
*/
static class GenerateOutput {
private long bytes;
private long time;
GenerateOutput(long bytesWritten, long timeTaken) {
this.bytes = bytesWritten;
this.time = timeTaken;
}
long getBytesWritten() {
return bytes;
}
long getTimeTaken() {
return time;
}
public String toString() {
return "Wrote " + getBytesWritten() + " bytes " + " which took "
+ getTimeTaken() + " milliseconds";
}
}
/**
* Class used to hold a byte buffer and offset position for generating data
*/
private static class GenerateResult {
private long offset;
private ByteBuffer buffer;
GenerateResult(long offset, ByteBuffer buffer) {
this.offset = offset;
this.buffer = buffer;
}
long getOffset() {
return offset;
}
ByteBuffer getBuffer() {
return buffer;
}
}
/**
* What a header write output returns need the hash value to use and the time
* taken to perform the write + bytes written
*/
private static class WriteInfo {
private long hashValue;
private long bytesWritten;
private long timeTaken;
WriteInfo(long hashValue, long bytesWritten, long timeTaken) {
this.hashValue = hashValue;
this.bytesWritten = bytesWritten;
this.timeTaken = timeTaken;
}
long getHashValue() {
return hashValue;
}
long getTimeTaken() {
return timeTaken;
}
long getBytesWritten() {
return bytesWritten;
}
}
/**
* Inits with given buffer size (must be greater than bytes per long and a
* multiple of bytes per long)
*
* @param rnd
* random number generator to use for hash value creation
*
* @param bufferSize
* size which must be greater than BYTES_PER_LONG and which also must
* be a multiple of BYTES_PER_LONG
*/
DataWriter(Random rnd, int bufferSize) {
if (bufferSize < BYTES_PER_LONG) {
throw new IllegalArgumentException(
"Buffer size must be greater than or equal to " + BYTES_PER_LONG);
}
if ((bufferSize % BYTES_PER_LONG) != 0) {
throw new IllegalArgumentException("Buffer size must be a multiple of "
+ BYTES_PER_LONG);
}
this.bufferSize = bufferSize;
this.rnd = rnd;
}
/**
* Inits with default buffer size
*/
DataWriter(Random rnd) {
this(rnd, Constants.BUFFERSIZE);
}
/**
* Generates a partial segment which is less than bytes per long size
*
* @param byteAm
* the number of bytes to generate (less than bytes per long)
* @param offset
* the staring offset
* @param hasher
* hasher to use for generating data given an offset
*
* @return GenerateResult containing new offset and byte buffer
*/
private GenerateResult generatePartialSegment(int byteAm, long offset,
DataHasher hasher) {
if (byteAm > BYTES_PER_LONG) {
throw new IllegalArgumentException(
"Partial bytes must be less or equal to " + BYTES_PER_LONG);
}
if (byteAm <= 0) {
throw new IllegalArgumentException(
"Partial bytes must be greater than zero and not " + byteAm);
}
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
buf.putLong(hasher.generate(offset));
ByteBuffer allBytes = ByteBuffer.wrap(new byte[byteAm]);
buf.rewind();
for (int i = 0; i < byteAm; ++i) {
allBytes.put(buf.get());
}
allBytes.rewind();
return new GenerateResult(offset, allBytes);
}
/**
* Generates a full segment (aligned to bytes per long) of the given byte
* amount size
*
* @param byteAm
* long aligned size
* @param startOffset
* starting hash offset
* @param hasher
* hasher to use for generating data given an offset
* @return GenerateResult containing new offset and byte buffer
*/
private GenerateResult generateFullSegment(int byteAm, long startOffset,
DataHasher hasher) {
if (byteAm <= 0) {
throw new IllegalArgumentException(
"Byte amount must be greater than zero and not " + byteAm);
}
if ((byteAm % BYTES_PER_LONG) != 0) {
throw new IllegalArgumentException("Byte amount " + byteAm
+ " must be a multiple of " + BYTES_PER_LONG);
}
// generate all the segments
ByteBuffer allBytes = ByteBuffer.wrap(new byte[byteAm]);
long offset = startOffset;
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
for (long i = 0; i < byteAm; i += BYTES_PER_LONG) {
buf.rewind();
buf.putLong(hasher.generate(offset));
buf.rewind();
allBytes.put(buf);
offset += BYTES_PER_LONG;
}
allBytes.rewind();
return new GenerateResult(offset, allBytes);
}
/**
* Writes a set of bytes to the output stream, for full segments it will write
* out the complete segment but for partial segments, ie when the last
* position does not fill up a full long then a partial set will be written
* out containing the needed bytes from the expected full segment
*
* @param byteAm
* the amount of bytes to write
* @param startPos
* a BYTES_PER_LONG aligned start position
* @param hasher
* hasher to use for generating data given an offset
* @param out
* the output stream to write to
* @return how many bytes were written
* @throws IOException
*/
private GenerateOutput writePieces(long byteAm, long startPos,
DataHasher hasher, OutputStream out) throws IOException {
if (byteAm <= 0) {
return new GenerateOutput(0, 0);
}
if (startPos < 0) {
startPos = 0;
}
int leftOver = (int) (byteAm % bufferSize);
long fullPieces = byteAm / bufferSize;
long offset = startPos;
long bytesWritten = 0;
long timeTaken = 0;
// write the full pieces that fit in the buffer size
for (long i = 0; i < fullPieces; ++i) {
GenerateResult genData = generateFullSegment(bufferSize, offset, hasher);
offset = genData.getOffset();
ByteBuffer gBuf = genData.getBuffer();
{
byte[] buf = gBuf.array();
long startTime = Timer.now();
out.write(buf);
if (Constants.FLUSH_WRITES) {
out.flush();
}
timeTaken += Timer.elapsed(startTime);
bytesWritten += buf.length;
}
}
if (leftOver > 0) {
ByteBuffer leftOverBuf = ByteBuffer.wrap(new byte[leftOver]);
int bytesLeft = leftOver % BYTES_PER_LONG;
leftOver = leftOver - bytesLeft;
// collect the piece which do not fit in the buffer size but is
// also greater or eq than BYTES_PER_LONG and a multiple of it
if (leftOver > 0) {
GenerateResult genData = generateFullSegment(leftOver, offset, hasher);
offset = genData.getOffset();
leftOverBuf.put(genData.getBuffer());
}
// collect any single partial byte segment
if (bytesLeft > 0) {
GenerateResult genData = generatePartialSegment(bytesLeft, offset,
hasher);
offset = genData.getOffset();
leftOverBuf.put(genData.getBuffer());
}
// do the write of both
leftOverBuf.rewind();
{
byte[] buf = leftOverBuf.array();
long startTime = Timer.now();
out.write(buf);
if (Constants.FLUSH_WRITES) {
out.flush();
}
timeTaken += Timer.elapsed(startTime);
bytesWritten += buf.length;
}
}
return new GenerateOutput(bytesWritten, timeTaken);
}
/**
* Writes to a stream the given number of bytes specified
*
* @param byteAm
* the file size in number of bytes to write
*
* @param out
* the outputstream to write to
*
* @return the number of bytes written + time taken
*
* @throws IOException
*/
GenerateOutput writeSegment(long byteAm, OutputStream out)
throws IOException {
long headerLen = getHeaderLength();
if (byteAm < headerLen) {
// not enough bytes to write even the header
return new GenerateOutput(0, 0);
}
// adjust for header length
byteAm -= headerLen;
if (byteAm < 0) {
byteAm = 0;
}
WriteInfo header = writeHeader(out, byteAm);
DataHasher hasher = new DataHasher(header.getHashValue());
GenerateOutput pRes = writePieces(byteAm, 0, hasher, out);
long bytesWritten = pRes.getBytesWritten() + header.getBytesWritten();
long timeTaken = header.getTimeTaken() + pRes.getTimeTaken();
return new GenerateOutput(bytesWritten, timeTaken);
}
/**
* Gets the header length
*
* @return int
*/
static int getHeaderLength() {
return HEADER_LENGTH;
}
/**
* Writes a header to the given output stream
*
* @param os
* output stream to write to
*
* @param fileSize
* the file size to write
*
* @return WriteInfo
*
* @throws IOException
* if a write failure occurs
*/
WriteInfo writeHeader(OutputStream os, long fileSize) throws IOException {
int headerLen = getHeaderLength();
ByteBuffer buf = ByteBuffer.wrap(new byte[headerLen]);
long hash = rnd.nextLong();
buf.putLong(hash);
buf.putLong(fileSize);
buf.rewind();
byte[] headerData = buf.array();
long elapsed = 0;
{
long startTime = Timer.now();
os.write(headerData);
if (Constants.FLUSH_WRITES) {
os.flush();
}
elapsed += Timer.elapsed(startTime);
}
return new WriteInfo(hash, headerLen, elapsed);
}
}
| 11,135
| 28.617021
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.text.NumberFormat;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.Constants.OperationType;
import org.apache.hadoop.util.StringUtils;
/**
* Simple access layer onto of a configuration object that extracts the slive
* specific configuration values needed for slive running
*/
class ConfigExtractor {
private static final Log LOG = LogFactory.getLog(ConfigExtractor.class);
private Configuration config;
ConfigExtractor(Configuration cfg) {
this.config = cfg;
}
/**
* @return the wrapped configuration that this extractor will use
*/
Configuration getConfig() {
return this.config;
}
/**
* @return the location of where data should be written to
*/
Path getDataPath() {
Path base = getBaseDirectory();
if (base == null) {
return null;
}
return new Path(base, Constants.DATA_DIR);
}
/**
* @return the location of where the reducer should write its data to
*/
Path getOutputPath() {
Path base = getBaseDirectory();
if (base == null) {
return null;
}
return new Path(base, Constants.OUTPUT_DIR);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
*
* @return the base directory where output & data should be stored using
* primary,config,default (in that order)
*/
Path getBaseDirectory(String primary) {
String path = primary;
if (path == null) {
path = config.get(ConfigOption.BASE_DIR.getCfgOption());
}
if (path == null) {
path = ConfigOption.BASE_DIR.getDefault();
}
if (path == null) {
return null;
}
return new Path(path);
}
/**
* @return the base directory using only config and default values
*/
Path getBaseDirectory() {
return getBaseDirectory(null);
}
/**
* @return whether the mapper or reducer should exit when they get there first
* error using only config and default values
*/
boolean shouldExitOnFirstError() {
return shouldExitOnFirstError(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
*
* @return the boolean of whether the mapper/reducer should exit when they
* first error from primary,config,default (in that order)
*/
boolean shouldExitOnFirstError(String primary) {
String val = primary;
if (val == null) {
val = config.get(ConfigOption.EXIT_ON_ERROR.getCfgOption());
}
if (val == null) {
return ConfigOption.EXIT_ON_ERROR.getDefault();
}
return Boolean.parseBoolean(val);
}
/**
* @return whether the mapper or reducer should wait for truncate recovery
*/
boolean shouldWaitOnTruncate() {
return shouldWaitOnTruncate(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
*
* @return whether the mapper or reducer should wait for truncate recovery
*/
boolean shouldWaitOnTruncate(String primary) {
String val = primary;
if (val == null) {
val = config.get(ConfigOption.EXIT_ON_ERROR.getCfgOption());
}
if (val == null) {
return ConfigOption.EXIT_ON_ERROR.getDefault();
}
return Boolean.parseBoolean(val);
}
/**
* @return the number of reducers to use
*/
Integer getReducerAmount() {
// should be slive.reduces
return getInteger(null, ConfigOption.REDUCES);
}
/**
* @return the number of mappers to use using config and default values for
* lookup
*/
Integer getMapAmount() {
return getMapAmount(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the reducer amount to use
*/
Integer getMapAmount(String primary) {
return getInteger(primary, ConfigOption.MAPS);
}
/**
* @return the duration in seconds (or null or Integer.MAX for no limit) using
* the configuration and default as lookup
*/
Integer getDuration() {
return getDuration(null);
}
/**
* @return the duration in milliseconds or null if no limit using config and
* default as lookup
*/
Integer getDurationMilliseconds() {
Integer seconds = getDuration();
if (seconds == null || seconds == Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
int milliseconds = (seconds * 1000);
if (milliseconds < 0) {
milliseconds = 0;
}
return milliseconds;
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the duration in seconds (or null or Integer.MAX for no limit)
*/
Integer getDuration(String primary) {
return getInteger(primary, ConfigOption.DURATION);
}
/**
* @return the total number of operations to run using config and default as
* lookup
*/
Integer getOpCount() {
return getOpCount(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the total number of operations to run
*/
Integer getOpCount(String primary) {
return getInteger(primary, ConfigOption.OPS);
}
/**
* @return the total number of files per directory using config and default as
* lookup
*/
Integer getDirSize() {
return getDirSize(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the total number of files per directory
*/
Integer getDirSize(String primary) {
return getInteger(primary, ConfigOption.DIR_SIZE);
}
/**
* @param primary
* the primary string to attempt to convert into a integer
* @param opt
* the option to use as secondary + default if no primary given
* @return a parsed integer
*/
private Integer getInteger(String primary, ConfigOption<Integer> opt) {
String value = primary;
if (value == null) {
value = config.get(opt.getCfgOption());
}
if (value == null) {
return opt.getDefault();
}
return Integer.parseInt(value);
}
/**
* @return the total number of files allowed using configuration and default
* for lookup
*/
Integer getTotalFiles() {
return getTotalFiles(null);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the total number of files allowed
*/
Integer getTotalFiles(String primary) {
return getInteger(primary, ConfigOption.FILES);
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the random seed start point or null if none
*/
Long getRandomSeed(String primary) {
String seed = primary;
if (seed == null) {
seed = config.get(ConfigOption.RANDOM_SEED.getCfgOption());
}
if (seed == null) {
return null;
}
return Long.parseLong(seed);
}
/**
* @return the random seed start point or null if none using config and then
* default as lookup
*/
Long getRandomSeed() {
return getRandomSeed(null);
}
/**
* @return the result file location or null if none using config and then
* default as lookup
*/
String getResultFile() {
return getResultFile(null);
}
/**
* Gets the grid queue name to run on using config and default only
*
* @return String
*/
String getQueueName() {
return getQueueName(null);
}
/**
* Gets the grid queue name to run on using the primary string or config or
* default
*
* @param primary
*
* @return String
*/
String getQueueName(String primary) {
String q = primary;
if (q == null) {
q = config.get(ConfigOption.QUEUE_NAME.getCfgOption());
}
if (q == null) {
q = ConfigOption.QUEUE_NAME.getDefault();
}
return q;
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the result file location
*/
String getResultFile(String primary) {
String fn = primary;
if (fn == null) {
fn = config.get(ConfigOption.RESULT_FILE.getCfgOption());
}
if (fn == null) {
fn = ConfigOption.RESULT_FILE.getDefault();
}
return fn;
}
/**
* @param primary
* primary the initial string to be used for the value of this
* configuration option (if not provided then config and then the
* default are used)
* @return the integer range allowed for the block size
*/
Range<Long> getBlockSize(String primary) {
return getMinMaxBytes(ConfigOption.BLOCK_SIZE, primary);
}
/**
* @return the integer range allowed for the block size using config and
* default for lookup
*/
Range<Long> getBlockSize() {
return getBlockSize(null);
}
/**
* @param cfgopt
* the configuration option to use for config and default lookup
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the parsed short range from primary, config, default
*/
private Range<Short> getMinMaxShort(ConfigOption<Short> cfgopt, String primary) {
String sval = primary;
if (sval == null) {
sval = config.get(cfgopt.getCfgOption());
}
Range<Short> range = null;
if (sval != null) {
String pieces[] = Helper.getTrimmedStrings(sval);
if (pieces.length == 2) {
String min = pieces[0];
String max = pieces[1];
short minVal = Short.parseShort(min);
short maxVal = Short.parseShort(max);
if (minVal > maxVal) {
short tmp = minVal;
minVal = maxVal;
maxVal = tmp;
}
range = new Range<Short>(minVal, maxVal);
}
}
if (range == null) {
Short def = cfgopt.getDefault();
if (def != null) {
range = new Range<Short>(def, def);
}
}
return range;
}
/**
* @param cfgopt
* the configuration option to use for config and default lookup
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the parsed long range from primary, config, default
*/
private Range<Long> getMinMaxLong(ConfigOption<Long> cfgopt, String primary) {
String sval = primary;
if (sval == null) {
sval = config.get(cfgopt.getCfgOption());
}
Range<Long> range = null;
if (sval != null) {
String pieces[] = Helper.getTrimmedStrings(sval);
if (pieces.length == 2) {
String min = pieces[0];
String max = pieces[1];
long minVal = Long.parseLong(min);
long maxVal = Long.parseLong(max);
if (minVal > maxVal) {
long tmp = minVal;
minVal = maxVal;
maxVal = tmp;
}
range = new Range<Long>(minVal, maxVal);
}
}
if (range == null) {
Long def = cfgopt.getDefault();
if (def != null) {
range = new Range<Long>(def, def);
}
}
return range;
}
/**
* @param cfgopt
* the configuration option to use for config and default lookup
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the parsed integer byte range from primary, config, default
*/
private Range<Long> getMinMaxBytes(ConfigOption<Long> cfgopt, String primary) {
String sval = primary;
if (sval == null) {
sval = config.get(cfgopt.getCfgOption());
}
Range<Long> range = null;
if (sval != null) {
String pieces[] = Helper.getTrimmedStrings(sval);
if (pieces.length == 2) {
String min = pieces[0];
String max = pieces[1];
long tMin = StringUtils.TraditionalBinaryPrefix.string2long(min);
long tMax = StringUtils.TraditionalBinaryPrefix.string2long(max);
if (tMin > tMax) {
long tmp = tMin;
tMin = tMax;
tMax = tmp;
}
range = new Range<Long>(tMin, tMax);
}
}
if (range == null) {
Long def = cfgopt.getDefault();
if (def != null) {
range = new Range<Long>(def, def);
}
}
return range;
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the replication range
*/
Range<Short> getReplication(String primary) {
return getMinMaxShort(ConfigOption.REPLICATION_AM, primary);
}
/**
* @return the replication range using config and default for lookup
*/
Range<Short> getReplication() {
return getReplication(null);
}
/**
* @return the map of operations to perform using config (percent may be null
* if unspecified)
*/
Map<OperationType, OperationData> getOperations() {
Map<OperationType, OperationData> operations = new HashMap<OperationType, OperationData>();
for (OperationType type : OperationType.values()) {
String opname = type.lowerName();
String keyname = String.format(Constants.OP, opname);
String kval = config.get(keyname);
if (kval == null) {
continue;
}
operations.put(type, new OperationData(kval));
}
return operations;
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the append byte size range (or null if none)
*/
Range<Long> getAppendSize(String primary) {
return getMinMaxBytes(ConfigOption.APPEND_SIZE, primary);
}
/**
* @return the append byte size range (or null if none) using config and
* default for lookup
*/
Range<Long> getAppendSize() {
return getAppendSize(null);
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the truncate byte size range (or null if none)
*/
Range<Long> getTruncateSize(String primary) {
return getMinMaxBytes(ConfigOption.TRUNCATE_SIZE, primary);
}
/**
* @return the truncate byte size range (or null if none) using config and
* default for lookup
*/
Range<Long> getTruncateSize() {
return getTruncateSize(null);
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the sleep range (or null if none)
*/
Range<Long> getSleepRange(String primary) {
return getMinMaxLong(ConfigOption.SLEEP_TIME, primary);
}
/**
* @return the sleep range (or null if none) using config and default for
* lookup
*/
Range<Long> getSleepRange() {
return getSleepRange(null);
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the write byte size range (or null if none)
*/
Range<Long> getWriteSize(String primary) {
return getMinMaxBytes(ConfigOption.WRITE_SIZE, primary);
}
/**
* @return the write byte size range (or null if none) using config and
* default for lookup
*/
Range<Long> getWriteSize() {
return getWriteSize(null);
}
/**
* Returns whether the write range should use the block size range
*
* @return true|false
*/
boolean shouldWriteUseBlockSize() {
Range<Long> writeRange = getWriteSize();
if (writeRange == null
|| (writeRange.getLower() == writeRange.getUpper() && (writeRange
.getUpper() == Long.MAX_VALUE))) {
return true;
}
return false;
}
/**
* Returns whether the append range should use the block size range
*
* @return true|false
*/
boolean shouldAppendUseBlockSize() {
Range<Long> appendRange = getAppendSize();
if (appendRange == null
|| (appendRange.getLower() == appendRange.getUpper() && (appendRange
.getUpper() == Long.MAX_VALUE))) {
return true;
}
return false;
}
/**
* Returns whether the truncate range should use the block size range
*
* @return true|false
*/
boolean shouldTruncateUseBlockSize() {
Range<Long> truncateRange = getTruncateSize();
if (truncateRange == null
|| (truncateRange.getLower() == truncateRange.getUpper()
&& (truncateRange.getUpper() == Long.MAX_VALUE))) {
return true;
}
return false;
}
/**
* Returns whether the read range should use the entire file
*
* @return true|false
*/
boolean shouldReadFullFile() {
Range<Long> readRange = getReadSize();
if (readRange == null
|| (readRange.getLower() == readRange.getUpper() && (readRange
.getUpper() == Long.MAX_VALUE))) {
return true;
}
return false;
}
/**
* @param primary
* the initial string to be used for the value of this configuration
* option (if not provided then config and then the default are used)
* @return the read byte size range (or null if none)
*/
Range<Long> getReadSize(String primary) {
return getMinMaxBytes(ConfigOption.READ_SIZE, primary);
}
/**
* Gets the bytes per checksum (if it exists or null if not)
*
* @return Long
*/
Long getByteCheckSum() {
String val = config.get(Constants.BYTES_PER_CHECKSUM);
if(val == null) {
return null;
}
return Long.parseLong(val);
}
/**
* @return the read byte size range (or null if none) using config and default
* for lookup
*/
Range<Long> getReadSize() {
return getReadSize(null);
}
/**
* Dumps out the given options for the given config extractor
*
* @param cfg
* the config to write to the log
*/
static void dumpOptions(ConfigExtractor cfg) {
if (cfg == null) {
return;
}
LOG.info("Base directory = " + cfg.getBaseDirectory());
LOG.info("Data directory = " + cfg.getDataPath());
LOG.info("Output directory = " + cfg.getOutputPath());
LOG.info("Result file = " + cfg.getResultFile());
LOG.info("Grid queue = " + cfg.getQueueName());
LOG.info("Should exit on first error = " + cfg.shouldExitOnFirstError());
{
String duration = "Duration = ";
if (cfg.getDurationMilliseconds() == Integer.MAX_VALUE) {
duration += "unlimited";
} else {
duration += cfg.getDurationMilliseconds() + " milliseconds";
}
LOG.info(duration);
}
LOG.info("Map amount = " + cfg.getMapAmount());
LOG.info("Reducer amount = " + cfg.getReducerAmount());
LOG.info("Operation amount = " + cfg.getOpCount());
LOG.info("Total file limit = " + cfg.getTotalFiles());
LOG.info("Total dir file limit = " + cfg.getDirSize());
{
String read = "Read size = ";
if (cfg.shouldReadFullFile()) {
read += "entire file";
} else {
read += cfg.getReadSize() + " bytes";
}
LOG.info(read);
}
{
String write = "Write size = ";
if (cfg.shouldWriteUseBlockSize()) {
write += "blocksize";
} else {
write += cfg.getWriteSize() + " bytes";
}
LOG.info(write);
}
{
String append = "Append size = ";
if (cfg.shouldAppendUseBlockSize()) {
append += "blocksize";
} else {
append += cfg.getAppendSize() + " bytes";
}
LOG.info(append);
}
{
String bsize = "Block size = ";
bsize += cfg.getBlockSize() + " bytes";
LOG.info(bsize);
}
if (cfg.getRandomSeed() != null) {
LOG.info("Random seed = " + cfg.getRandomSeed());
}
if (cfg.getSleepRange() != null) {
LOG.info("Sleep range = " + cfg.getSleepRange() + " milliseconds");
}
LOG.info("Replication amount = " + cfg.getReplication());
LOG.info("Operations are:");
NumberFormat percFormatter = Formatter.getPercentFormatter();
Map<OperationType, OperationData> operations = cfg.getOperations();
for (OperationType type : operations.keySet()) {
String name = type.name();
LOG.info(name);
OperationData opInfo = operations.get(type);
LOG.info(" " + opInfo.getDistribution().name());
if (opInfo.getPercent() != null) {
LOG.info(" " + percFormatter.format(opInfo.getPercent()));
} else {
LOG.info(" ???");
}
}
}
}
| 23,060
| 28.080706
| 95
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigMerger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.ArgumentParser.ParsedOutput;
import org.apache.hadoop.fs.slive.Constants.Distribution;
import org.apache.hadoop.fs.slive.Constants.OperationType;
import org.apache.hadoop.util.StringUtils;
/**
* Class which merges options given from a config file and the command line and
* performs some basic verification of the data retrieved and sets the verified
* values back into the configuration object for return
*/
class ConfigMerger {
/**
* Exception that represents config problems...
*/
static class ConfigException extends IOException {
private static final long serialVersionUID = 2047129184917444550L;
ConfigException(String msg) {
super(msg);
}
ConfigException(String msg, Throwable e) {
super(msg, e);
}
}
/**
* Merges the given command line parsed output with the given configuration
* object and returns the new configuration object with the correct options
* overwritten
*
* @param opts
* the parsed command line option output
* @param base
* the base configuration to merge with
* @return merged configuration object
* @throws ConfigException
* when configuration errors or verification occur
*/
Configuration getMerged(ParsedOutput opts, Configuration base)
throws ConfigException {
return handleOptions(opts, base);
}
/**
* Gets the base set of operations to use
*
* @return Map
*/
private Map<OperationType, OperationData> getBaseOperations() {
Map<OperationType, OperationData> base = new HashMap<OperationType, OperationData>();
// add in all the operations
// since they will all be applied unless changed
OperationType[] types = OperationType.values();
for (OperationType type : types) {
base.put(type, new OperationData(Distribution.UNIFORM, null));
}
return base;
}
/**
* Handles the specific task of merging operations from the command line or
* extractor object into the base configuration provided
*
* @param opts
* the parsed command line option output
* @param base
* the base configuration to merge with
* @param extractor
* the access object to fetch operations from if none from the
* command line
* @return merged configuration object
* @throws ConfigException
* when verification fails
*/
private Configuration handleOperations(ParsedOutput opts, Configuration base,
ConfigExtractor extractor) throws ConfigException {
// get the base set to start off with
Map<OperationType, OperationData> operations = getBaseOperations();
// merge with what is coming from config
Map<OperationType, OperationData> cfgOperations = extractor.getOperations();
for (OperationType opType : cfgOperations.keySet()) {
operations.put(opType, cfgOperations.get(opType));
}
// see if any coming in from the command line
for (OperationType opType : OperationType.values()) {
String opName = opType.lowerName();
String opVal = opts.getValue(opName);
if (opVal != null) {
operations.put(opType, new OperationData(opVal));
}
}
// remove those with <= zero percent
{
Map<OperationType, OperationData> cleanedOps = new HashMap<OperationType, OperationData>();
for (OperationType opType : operations.keySet()) {
OperationData data = operations.get(opType);
if (data.getPercent() == null || data.getPercent() > 0.0d) {
cleanedOps.put(opType, data);
}
}
operations = cleanedOps;
}
if (operations.isEmpty()) {
throw new ConfigException("No operations provided!");
}
// verify and adjust
double currPct = 0;
int needFill = 0;
for (OperationType type : operations.keySet()) {
OperationData op = operations.get(type);
if (op.getPercent() != null) {
currPct += op.getPercent();
} else {
needFill++;
}
}
if (currPct > 1) {
throw new ConfigException(
"Unable to have accumlative percent greater than 100%");
}
if (needFill > 0 && currPct < 1) {
double leftOver = 1.0 - currPct;
Map<OperationType, OperationData> mpcp = new HashMap<OperationType, OperationData>();
for (OperationType type : operations.keySet()) {
OperationData op = operations.get(type);
if (op.getPercent() == null) {
op = new OperationData(op.getDistribution(), (leftOver / needFill));
}
mpcp.put(type, op);
}
operations = mpcp;
} else if (needFill == 0 && currPct < 1) {
// redistribute
double leftOver = 1.0 - currPct;
Map<OperationType, OperationData> mpcp = new HashMap<OperationType, OperationData>();
double each = leftOver / operations.keySet().size();
for (OperationType t : operations.keySet()) {
OperationData op = operations.get(t);
op = new OperationData(op.getDistribution(), (op.getPercent() + each));
mpcp.put(t, op);
}
operations = mpcp;
} else if (needFill > 0 && currPct >= 1) {
throw new ConfigException(needFill
+ " unfilled operations but no percentage left to fill with");
}
// save into base
for (OperationType opType : operations.keySet()) {
String opName = opType.lowerName();
OperationData opData = operations.get(opType);
String distr = opData.getDistribution().lowerName();
String ratio = new Double(opData.getPercent() * 100.0d).toString();
base.set(String.format(Constants.OP, opName), opData.toString());
base.set(String.format(Constants.OP_DISTR, opName), distr);
base.set(String.format(Constants.OP_PERCENT, opName), ratio);
}
return base;
}
/**
* Handles merging all options and verifying from the given command line
* output and the given base configuration and returns the merged
* configuration
*
* @param opts
* the parsed command line option output
* @param base
* the base configuration to merge with
* @return the merged configuration
* @throws ConfigException
*/
private Configuration handleOptions(ParsedOutput opts, Configuration base)
throws ConfigException {
// ensure variables are overwritten and verified
ConfigExtractor extractor = new ConfigExtractor(base);
// overwrite the map amount and check to ensure > 0
{
Integer mapAmount = null;
try {
mapAmount = extractor.getMapAmount(opts.getValue(ConfigOption.MAPS
.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging map amount", e);
}
if (mapAmount != null) {
if (mapAmount <= 0) {
throw new ConfigException(
"Map amount can not be less than or equal to zero");
}
base.set(ConfigOption.MAPS.getCfgOption(), mapAmount.toString());
}
}
// overwrite the reducer amount and check to ensure > 0
{
Integer reduceAmount = null;
try {
reduceAmount = extractor.getMapAmount(opts.getValue(ConfigOption.REDUCES
.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging reducer amount", e);
}
if (reduceAmount != null) {
if (reduceAmount <= 0) {
throw new ConfigException(
"Reducer amount can not be less than or equal to zero");
}
base.set(ConfigOption.REDUCES.getCfgOption(), reduceAmount.toString());
}
}
// overwrite the duration amount and ensure > 0
{
Integer duration = null;
try {
duration = extractor.getDuration(opts.getValue(ConfigOption.DURATION
.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging duration", e);
}
if (duration != null) {
if (duration <= 0) {
throw new ConfigException(
"Duration can not be less than or equal to zero");
}
base.set(ConfigOption.DURATION.getCfgOption(), duration.toString());
}
}
// overwrite the operation amount and ensure > 0
{
Integer operationAmount = null;
try {
operationAmount = extractor.getOpCount(opts.getValue(ConfigOption.OPS
.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging operation amount", e);
}
if (operationAmount != null) {
if (operationAmount <= 0) {
throw new ConfigException(
"Operation amount can not be less than or equal to zero");
}
base.set(ConfigOption.OPS.getCfgOption(), operationAmount.toString());
}
}
// overwrite the exit on error setting
{
try {
boolean exitOnError = extractor.shouldExitOnFirstError(opts
.getValue(ConfigOption.EXIT_ON_ERROR.getOpt()));
base.setBoolean(ConfigOption.EXIT_ON_ERROR.getCfgOption(), exitOnError);
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging exit on error value", e);
}
}
// overwrite the truncate wait setting
{
try {
boolean waitOnTruncate = extractor.shouldWaitOnTruncate(opts
.getValue(ConfigOption.TRUNCATE_WAIT.getOpt()));
base.setBoolean(ConfigOption.TRUNCATE_WAIT.getCfgOption(),
waitOnTruncate);
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging wait on truncate value", e);
}
}
// verify and set file limit and ensure > 0
{
Integer fileAm = null;
try {
fileAm = extractor.getTotalFiles(opts.getValue(ConfigOption.FILES
.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging total file limit amount", e);
}
if (fileAm != null) {
if (fileAm <= 0) {
throw new ConfigException(
"File amount can not be less than or equal to zero");
}
base.set(ConfigOption.FILES.getCfgOption(), fileAm.toString());
}
}
// set the grid queue to run on
{
try {
String qname = extractor.getQueueName(opts
.getValue(ConfigOption.QUEUE_NAME.getOpt()));
if (qname != null) {
base.set(ConfigOption.QUEUE_NAME.getCfgOption(), qname);
}
} catch (Exception e) {
throw new ConfigException("Error extracting & merging queue name", e);
}
}
// verify and set the directory limit and ensure > 0
{
Integer directoryLimit = null;
try {
directoryLimit = extractor.getDirSize(opts
.getValue(ConfigOption.DIR_SIZE.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging directory file limit", e);
}
if (directoryLimit != null) {
if (directoryLimit <= 0) {
throw new ConfigException(
"Directory file limit can not be less than or equal to zero");
}
base.set(ConfigOption.DIR_SIZE.getCfgOption(), directoryLimit
.toString());
}
}
// set the base directory
{
Path basedir = null;
try {
basedir = extractor.getBaseDirectory(opts
.getValue(ConfigOption.BASE_DIR.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging base directory",
e);
}
if (basedir != null) {
// always ensure in slive dir
basedir = new Path(basedir, Constants.BASE_DIR);
base.set(ConfigOption.BASE_DIR.getCfgOption(), basedir.toString());
}
}
// set the result file
{
String fn = null;
try {
fn = extractor.getResultFile(opts.getValue(ConfigOption.RESULT_FILE
.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging result file", e);
}
if (fn != null) {
base.set(ConfigOption.RESULT_FILE.getCfgOption(), fn);
}
}
{
String fn = null;
try {
fn = extractor.getResultFile(opts.getValue(ConfigOption.RESULT_FILE
.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging result file", e);
}
if (fn != null) {
base.set(ConfigOption.RESULT_FILE.getCfgOption(), fn);
}
}
// set the operations
{
try {
base = handleOperations(opts, base, extractor);
} catch (Exception e) {
throw new ConfigException("Error extracting & merging operations", e);
}
}
// set the replication amount range
{
Range<Short> replicationAm = null;
try {
replicationAm = extractor.getReplication(opts
.getValue(ConfigOption.REPLICATION_AM.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging replication amount range", e);
}
if (replicationAm != null) {
int minRepl = base.getInt(Constants.MIN_REPLICATION, 1);
if (replicationAm.getLower() < minRepl) {
throw new ConfigException(
"Replication amount minimum is less than property configured minimum "
+ minRepl);
}
if (replicationAm.getLower() > replicationAm.getUpper()) {
throw new ConfigException(
"Replication amount minimum is greater than its maximum");
}
if (replicationAm.getLower() <= 0) {
throw new ConfigException(
"Replication amount minimum must be greater than zero");
}
base.set(ConfigOption.REPLICATION_AM.getCfgOption(), replicationAm
.toString());
}
}
// set the sleep range
{
Range<Long> sleepRange = null;
try {
sleepRange = extractor.getSleepRange(opts
.getValue(ConfigOption.SLEEP_TIME.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging sleep size range", e);
}
if (sleepRange != null) {
if (sleepRange.getLower() > sleepRange.getUpper()) {
throw new ConfigException(
"Sleep range minimum is greater than its maximum");
}
if (sleepRange.getLower() <= 0) {
throw new ConfigException(
"Sleep range minimum must be greater than zero");
}
base.set(ConfigOption.SLEEP_TIME.getCfgOption(), sleepRange.toString());
}
}
// set the packet size if given
{
String pSize = opts.getValue(ConfigOption.PACKET_SIZE.getOpt());
if (pSize == null) {
pSize = ConfigOption.PACKET_SIZE.getDefault();
}
if (pSize != null) {
try {
Long packetSize = StringUtils.TraditionalBinaryPrefix
.string2long(pSize);
base.set(ConfigOption.PACKET_SIZE.getCfgOption(), packetSize
.toString());
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging write packet size", e);
}
}
}
// set the block size range
{
Range<Long> blockSize = null;
try {
blockSize = extractor.getBlockSize(opts
.getValue(ConfigOption.BLOCK_SIZE.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging block size range", e);
}
if (blockSize != null) {
if (blockSize.getLower() > blockSize.getUpper()) {
throw new ConfigException(
"Block size minimum is greater than its maximum");
}
if (blockSize.getLower() <= 0) {
throw new ConfigException(
"Block size minimum must be greater than zero");
}
// ensure block size is a multiple of BYTES_PER_CHECKSUM
// if a value is set in the configuration
Long bytesPerChecksum = extractor.getByteCheckSum();
if (bytesPerChecksum != null) {
if ((blockSize.getLower() % bytesPerChecksum) != 0) {
throw new ConfigException(
"Blocksize lower bound must be a multiple of "
+ bytesPerChecksum);
}
if ((blockSize.getUpper() % bytesPerChecksum) != 0) {
throw new ConfigException(
"Blocksize upper bound must be a multiple of "
+ bytesPerChecksum);
}
}
base.set(ConfigOption.BLOCK_SIZE.getCfgOption(), blockSize.toString());
}
}
// set the read size range
{
Range<Long> readSize = null;
try {
readSize = extractor.getReadSize(opts.getValue(ConfigOption.READ_SIZE
.getOpt()));
} catch (Exception e) {
throw new ConfigException("Error extracting & merging read size range",
e);
}
if (readSize != null) {
if (readSize.getLower() > readSize.getUpper()) {
throw new ConfigException(
"Read size minimum is greater than its maximum");
}
if (readSize.getLower() < 0) {
throw new ConfigException(
"Read size minimum must be greater than or equal to zero");
}
base.set(ConfigOption.READ_SIZE.getCfgOption(), readSize.toString());
}
}
// set the write size range
{
Range<Long> writeSize = null;
try {
writeSize = extractor.getWriteSize(opts
.getValue(ConfigOption.WRITE_SIZE.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging write size range", e);
}
if (writeSize != null) {
if (writeSize.getLower() > writeSize.getUpper()) {
throw new ConfigException(
"Write size minimum is greater than its maximum");
}
if (writeSize.getLower() < 0) {
throw new ConfigException(
"Write size minimum must be greater than or equal to zero");
}
base.set(ConfigOption.WRITE_SIZE.getCfgOption(), writeSize.toString());
}
}
// set the append size range
{
Range<Long> appendSize = null;
try {
appendSize = extractor.getAppendSize(opts
.getValue(ConfigOption.APPEND_SIZE.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging append size range", e);
}
if (appendSize != null) {
if (appendSize.getLower() > appendSize.getUpper()) {
throw new ConfigException(
"Append size minimum is greater than its maximum");
}
if (appendSize.getLower() < 0) {
throw new ConfigException(
"Append size minimum must be greater than or equal to zero");
}
base
.set(ConfigOption.APPEND_SIZE.getCfgOption(), appendSize.toString());
}
}
// set the truncate size range
{
Range<Long> truncateSize = null;
try {
truncateSize = extractor.getTruncateSize(opts
.getValue(ConfigOption.TRUNCATE_SIZE.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging truncate size range", e);
}
if (truncateSize != null) {
if (truncateSize.getLower() > truncateSize.getUpper()) {
throw new ConfigException(
"Truncate size minimum is greater than its maximum");
}
if (truncateSize.getLower() < 0) {
throw new ConfigException(
"Truncate size minimum must be greater than or equal to zero");
}
base
.set(ConfigOption.TRUNCATE_SIZE.getCfgOption(), truncateSize.toString());
}
}
// set the seed
{
Long seed = null;
try {
seed = extractor.getRandomSeed(opts.getValue(ConfigOption.RANDOM_SEED
.getOpt()));
} catch (Exception e) {
throw new ConfigException(
"Error extracting & merging random number seed", e);
}
if (seed != null) {
base.set(ConfigOption.RANDOM_SEED.getCfgOption(), seed.toString());
}
}
return base;
}
}
| 21,471
| 34.315789
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/OperationWeight.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
/**
* Class which holds an operation and its weight (used in operation selection)
*/
class OperationWeight {
private double weight;
private Operation operation;
OperationWeight(Operation op, double weight) {
this.operation = op;
this.weight = weight;
}
/**
* Fetches the given operation weight
*
* @return Double
*/
double getWeight() {
return weight;
}
/**
* Gets the operation
*
* @return Operation
*/
Operation getOperation() {
return operation;
}
}
| 1,366
| 25.288462
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReportWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.PrintWriter;
import java.text.NumberFormat;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Class which provides a report for the given operation output
*/
class ReportWriter {
// simple measurement types
// expect long values
// these will be reported on + rates by this reporter
static final String OK_TIME_TAKEN = "milliseconds_taken";
static final String FAILURES = "failures";
static final String SUCCESSES = "successes";
static final String BYTES_WRITTEN = "bytes_written";
static final String FILES_CREATED = "files_created";
static final String DIR_ENTRIES = "dir_entries";
static final String OP_COUNT = "op_count";
static final String CHUNKS_VERIFIED = "chunks_verified";
static final String CHUNKS_UNVERIFIED = "chunks_unverified";
static final String BYTES_READ = "bytes_read";
static final String NOT_FOUND = "files_not_found";
static final String BAD_FILES = "bad_files";
private static final Log LOG = LogFactory.getLog(ReportWriter.class);
private static final String SECTION_DELIM = "-------------";
/**
* @return String to be used for as a section delimiter
*/
private String getSectionDelimiter() {
return SECTION_DELIM;
}
/**
* Writes a message the the logging library and the given print writer (if it
* is not null)
*
* @param msg
* the message to write
* @param os
* the print writer if specified to also write to
*/
private void writeMessage(String msg, PrintWriter os) {
LOG.info(msg);
if (os != null) {
os.println(msg);
}
}
/**
* Provides a simple report showing only the input size, and for each
* operation the operation type, measurement type and its values.
*
* @param input
* the list of operations to report on
* @param os
* any print writer for which output should be written to (along with
* the logging library)
*/
void basicReport(List<OperationOutput> input, PrintWriter os) {
writeMessage("Default report for " + input.size() + " operations ", os);
writeMessage(getSectionDelimiter(), os);
for (OperationOutput data : input) {
writeMessage("Operation \"" + data.getOperationType() + "\" measuring \""
+ data.getMeasurementType() + "\" = " + data.getValue(), os);
}
writeMessage(getSectionDelimiter(), os);
}
/**
* Provides a more detailed report for a given operation. This will output the
* keys and values for all input and then sort based on measurement type and
* attempt to show rates for various metrics which have expected types to be
* able to measure there rate. Currently this will show rates for bytes
* written, success count, files created, directory entries, op count and
* bytes read if the variable for time taken is available for each measurement
* type.
*
* @param operation
* the operation that is being reported on.
* @param input
* the set of data for that that operation.
* @param os
* any print writer for which output should be written to (along with
* the logging library)
*/
void opReport(String operation, List<OperationOutput> input,
PrintWriter os) {
writeMessage("Basic report for operation type " + operation, os);
writeMessage(getSectionDelimiter(), os);
for (OperationOutput data : input) {
writeMessage("Measurement \"" + data.getMeasurementType() + "\" = "
+ data.getValue(), os);
}
// split up into measurement types for rates...
Map<String, OperationOutput> combined = new TreeMap<String, OperationOutput>();
for (OperationOutput data : input) {
if (combined.containsKey(data.getMeasurementType())) {
OperationOutput curr = combined.get(data.getMeasurementType());
combined.put(data.getMeasurementType(), OperationOutput.merge(curr,
data));
} else {
combined.put(data.getMeasurementType(), data);
}
}
// handle the known types
OperationOutput timeTaken = combined.get(OK_TIME_TAKEN);
if (timeTaken != null) {
Long mTaken = Long.parseLong(timeTaken.getValue().toString());
if (mTaken > 0) {
NumberFormat formatter = Formatter.getDecimalFormatter();
for (String measurementType : combined.keySet()) {
Double rate = null;
String rateType = "";
if (measurementType.equals(BYTES_WRITTEN)) {
Long mbWritten = Long.parseLong(combined.get(measurementType)
.getValue().toString())
/ (Constants.MEGABYTES);
rate = (double) mbWritten / (double) (mTaken / 1000.0d);
rateType = "MB/sec";
} else if (measurementType.equals(SUCCESSES)) {
Long succ = Long.parseLong(combined.get(measurementType).getValue()
.toString());
rate = (double) succ / (double) (mTaken / 1000.0d);
rateType = "successes/sec";
} else if (measurementType.equals(FILES_CREATED)) {
Long filesCreated = Long.parseLong(combined.get(measurementType)
.getValue().toString());
rate = (double) filesCreated / (double) (mTaken / 1000.0d);
rateType = "files created/sec";
} else if (measurementType.equals(DIR_ENTRIES)) {
Long entries = Long.parseLong(combined.get(measurementType)
.getValue().toString());
rate = (double) entries / (double) (mTaken / 1000.0d);
rateType = "directory entries/sec";
} else if (measurementType.equals(OP_COUNT)) {
Long opCount = Long.parseLong(combined.get(measurementType)
.getValue().toString());
rate = (double) opCount / (double) (mTaken / 1000.0d);
rateType = "operations/sec";
} else if (measurementType.equals(BYTES_READ)) {
Long mbRead = Long.parseLong(combined.get(measurementType)
.getValue().toString())
/ (Constants.MEGABYTES);
rate = (double) mbRead / (double) (mTaken / 1000.0d);
rateType = "MB/sec";
}
if (rate != null) {
writeMessage("Rate for measurement \"" + measurementType + "\" = "
+ formatter.format(rate) + " " + rateType, os);
}
}
}
}
writeMessage(getSectionDelimiter(), os);
}
}
| 7,401
| 38.582888
| 83
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/MkdirOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which selects a random directory and attempts to create that
* directory.
*
* This operation will capture statistics on success the time taken to create
* that directory and the number of successful creations that occurred and on
* failure or error it will capture the number of failures and the amount of
* time taken to fail
*/
class MkdirOp extends Operation {
private static final Log LOG = LogFactory.getLog(MkdirOp.class);
MkdirOp(ConfigExtractor cfg, Random rnd) {
super(MkdirOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the directory name to try to make
*
* @return Path
*/
protected Path getDirectory() {
Path dir = getFinder().getDirectory();
return dir;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
Path dir = getDirectory();
boolean mkRes = false;
long timeTaken = 0;
{
long startTime = Timer.now();
mkRes = fs.mkdirs(dir);
timeTaken = Timer.elapsed(startTime);
}
// log stats
if (mkRes) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Made directory " + dir);
} else {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Could not make " + dir);
}
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with mkdir", e);
} catch (IOException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with mkdir", e);
}
return out;
}
}
| 3,124
| 31.552083
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TestSlive.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.slive.ArgumentParser.ParsedOutput;
import org.apache.hadoop.fs.slive.Constants.OperationType;
import org.apache.hadoop.fs.slive.DataVerifier.VerifyOutput;
import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
/**
* Junit 4 test for slive
*/
public class TestSlive {
private static final Log LOG = LogFactory.getLog(TestSlive.class);
private static final Random rnd = new Random(1L);
private static final String TEST_DATA_PROP = "test.build.data";
private static Configuration getBaseConfig() {
Configuration conf = new Configuration();
return conf;
}
/** gets the test write location according to the coding guidelines */
private static File getWriteLoc() {
String writeLoc = System.getProperty(TEST_DATA_PROP, "build/test/data/");
File writeDir = new File(writeLoc, "slive");
writeDir.mkdirs();
return writeDir;
}
/** gets where the MR job places its data + output + results */
private static File getFlowLocation() {
return new File(getWriteLoc(), "flow");
}
/** gets the test directory which is created by the mkdir op */
private static File getTestDir() {
return new File(getWriteLoc(), "slivedir");
}
/**
* gets the test file location
* which is used for reading, appending and created
*/
private static File getTestFile() {
return new File(getWriteLoc(), "slivefile");
}
/**
* gets the rename file which is used in combination
* with the test file to do a rename operation
*/
private static File getTestRenameFile() {
return new File(getWriteLoc(), "slivefile1");
}
/** gets the MR result file name */
private static File getResultFile() {
return new File(getWriteLoc(), "sliveresfile");
}
private static File getImaginaryFile() {
return new File(getWriteLoc(), "slivenofile");
}
/** gets the test program arguments used for merging and main MR running */
private String[] getTestArgs(boolean sleep) {
List<String> args = new LinkedList<String>();
// setup the options
{
args.add("-" + ConfigOption.WRITE_SIZE.getOpt());
args.add("1M,2M");
args.add("-" + ConfigOption.OPS.getOpt());
args.add(Constants.OperationType.values().length + "");
args.add("-" + ConfigOption.MAPS.getOpt());
args.add("2");
args.add("-" + ConfigOption.REDUCES.getOpt());
args.add("2");
args.add("-" + ConfigOption.APPEND_SIZE.getOpt());
args.add("1M,2M");
args.add("-" + ConfigOption.BLOCK_SIZE.getOpt());
args.add("1M,2M");
args.add("-" + ConfigOption.REPLICATION_AM.getOpt());
args.add("1,1");
if (sleep) {
args.add("-" + ConfigOption.SLEEP_TIME.getOpt());
args.add("10,10");
}
args.add("-" + ConfigOption.RESULT_FILE.getOpt());
args.add(getResultFile().toString());
args.add("-" + ConfigOption.BASE_DIR.getOpt());
args.add(getFlowLocation().toString());
args.add("-" + ConfigOption.DURATION.getOpt());
args.add("10");
args.add("-" + ConfigOption.DIR_SIZE.getOpt());
args.add("10");
args.add("-" + ConfigOption.FILES.getOpt());
args.add("10");
args.add("-" + ConfigOption.TRUNCATE_SIZE.getOpt());
args.add("0,1M");
}
return args.toArray(new String[args.size()]);
}
@Test
public void testFinder() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
PathFinder fr = new PathFinder(extractor, rnd);
// should only be able to select 10 files
// attempt for a given amount of iterations
int maxIterations = 10000;
Set<Path> files = new HashSet<Path>();
for (int i = 0; i < maxIterations; i++) {
files.add(fr.getFile());
}
assertTrue(files.size() == 10);
Set<Path> dirs = new HashSet<Path>();
for (int i = 0; i < maxIterations; i++) {
dirs.add(fr.getDirectory());
}
assertTrue(dirs.size() == 10);
}
@Test
public void testSelection() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
WeightSelector selector = new WeightSelector(extractor, rnd);
// should be 1 of each type - uniform
int expected = OperationType.values().length;
Operation op = null;
Set<String> types = new HashSet<String>();
FileSystem fs = FileSystem.get(extractor.getConfig());
while (true) {
op = selector.select(1, 1);
if (op == null) {
break;
}
// doesn't matter if they work or not
op.run(fs);
types.add(op.getType());
}
assertEquals(types.size(), expected);
}
// gets the config merged with the arguments
private ConfigExtractor getTestConfig(boolean sleep) throws Exception {
ArgumentParser parser = new ArgumentParser(getTestArgs(sleep));
ParsedOutput out = parser.parse();
assertTrue(!out.shouldOutputHelp());
ConfigMerger merge = new ConfigMerger();
Configuration cfg = merge.getMerged(out, getBaseConfig());
ConfigExtractor extractor = new ConfigExtractor(cfg);
return extractor;
}
@Before
public void ensureDeleted() throws Exception {
rDelete(getTestFile());
rDelete(getTestDir());
rDelete(getTestRenameFile());
rDelete(getResultFile());
rDelete(getFlowLocation());
rDelete(getImaginaryFile());
}
/** cleans up a file or directory recursively if need be */
private void rDelete(File place) throws Exception {
if (place.isFile()) {
LOG.info("Deleting file " + place);
assertTrue(place.delete());
} else if (place.isDirectory()) {
deleteDir(place);
}
}
/** deletes a dir and its contents */
private void deleteDir(File dir) throws Exception {
String fns[] = dir.list();
// delete contents first
for (String afn : fns) {
File fn = new File(dir, afn);
rDelete(fn);
}
LOG.info("Deleting directory " + dir);
// now delete the dir
assertTrue(dir.delete());
}
@Test
public void testArguments() throws Exception {
ConfigExtractor extractor = getTestConfig(true);
assertEquals(extractor.getOpCount().intValue(), Constants.OperationType
.values().length);
assertEquals(extractor.getMapAmount().intValue(), 2);
assertEquals(extractor.getReducerAmount().intValue(), 2);
Range<Long> apRange = extractor.getAppendSize();
assertEquals(apRange.getLower().intValue(), Constants.MEGABYTES * 1);
assertEquals(apRange.getUpper().intValue(), Constants.MEGABYTES * 2);
Range<Long> wRange = extractor.getWriteSize();
assertEquals(wRange.getLower().intValue(), Constants.MEGABYTES * 1);
assertEquals(wRange.getUpper().intValue(), Constants.MEGABYTES * 2);
Range<Long> trRange = extractor.getTruncateSize();
assertEquals(trRange.getLower().intValue(), 0);
assertEquals(trRange.getUpper().intValue(), Constants.MEGABYTES * 1);
Range<Long> bRange = extractor.getBlockSize();
assertEquals(bRange.getLower().intValue(), Constants.MEGABYTES * 1);
assertEquals(bRange.getUpper().intValue(), Constants.MEGABYTES * 2);
String resfile = extractor.getResultFile();
assertEquals(resfile, getResultFile().toString());
int durationMs = extractor.getDurationMilliseconds();
assertEquals(durationMs, 10 * 1000);
}
@Test
public void testDataWriting() throws Exception {
long byteAm = 100;
File fn = getTestFile();
DataWriter writer = new DataWriter(rnd);
FileOutputStream fs = new FileOutputStream(fn);
GenerateOutput ostat = writer.writeSegment(byteAm, fs);
LOG.info(ostat);
fs.close();
assertTrue(ostat.getBytesWritten() == byteAm);
DataVerifier vf = new DataVerifier();
FileInputStream fin = new FileInputStream(fn);
VerifyOutput vfout = vf.verifyFile(byteAm, new DataInputStream(fin));
LOG.info(vfout);
fin.close();
assertEquals(vfout.getBytesRead(), byteAm);
assertTrue(vfout.getChunksDifferent() == 0);
}
@Test
public void testRange() {
Range<Long> r = new Range<Long>(10L, 20L);
assertEquals(r.getLower().longValue(), 10L);
assertEquals(r.getUpper().longValue(), 20L);
}
@Test
public void testCreateOp() throws Exception {
// setup a valid config
ConfigExtractor extractor = getTestConfig(false);
final Path fn = new Path(getTestFile().getCanonicalPath());
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return fn;
}
};
runOperationOk(extractor, op, true);
}
@Test
public void testOpFailures() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
final Path fn = new Path(getImaginaryFile().getCanonicalPath());
ReadOp rop = new ReadOp(extractor, rnd) {
protected Path getReadFile() {
return fn;
}
};
runOperationBad(extractor, rop);
DeleteOp dop = new DeleteOp(extractor, rnd) {
protected Path getDeleteFile() {
return fn;
}
};
runOperationBad(extractor, dop);
RenameOp reop = new RenameOp(extractor, rnd) {
protected SrcTarget getRenames() {
return new SrcTarget(fn, fn);
}
};
runOperationBad(extractor, reop);
AppendOp aop = new AppendOp(extractor, rnd) {
protected Path getAppendFile() {
return fn;
}
};
runOperationBad(extractor, aop);
}
private void runOperationBad(ConfigExtractor cfg, Operation op)
throws Exception {
FileSystem fs = FileSystem.get(cfg.getConfig());
List<OperationOutput> data = op.run(fs);
assertTrue(!data.isEmpty());
boolean foundFail = false;
for (OperationOutput d : data) {
if (d.getMeasurementType().equals(ReportWriter.FAILURES)) {
foundFail = true;
}
if (d.getMeasurementType().equals(ReportWriter.NOT_FOUND)) {
foundFail = true;
}
}
assertTrue(foundFail);
}
private void runOperationOk(ConfigExtractor cfg, Operation op, boolean checkOk)
throws Exception {
FileSystem fs = FileSystem.get(cfg.getConfig());
List<OperationOutput> data = op.run(fs);
assertTrue(!data.isEmpty());
if (checkOk) {
boolean foundSuc = false;
boolean foundOpCount = false;
boolean foundTime = false;
for (OperationOutput d : data) {
assertTrue(!d.getMeasurementType().equals(ReportWriter.FAILURES));
if (d.getMeasurementType().equals(ReportWriter.SUCCESSES)) {
foundSuc = true;
}
if (d.getMeasurementType().equals(ReportWriter.OP_COUNT)) {
foundOpCount = true;
}
if (d.getMeasurementType().equals(ReportWriter.OK_TIME_TAKEN)) {
foundTime = true;
}
}
assertTrue(foundSuc);
assertTrue(foundOpCount);
assertTrue(foundTime);
}
}
@Test
public void testDelete() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
final Path fn = new Path(getTestFile().getCanonicalPath());
// ensure file created before delete
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return fn;
}
};
runOperationOk(extractor, op, true);
// now delete
DeleteOp dop = new DeleteOp(extractor, rnd) {
protected Path getDeleteFile() {
return fn;
}
};
runOperationOk(extractor, dop, true);
}
@Test
public void testRename() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
final Path src = new Path(getTestFile().getCanonicalPath());
final Path tgt = new Path(getTestRenameFile().getCanonicalPath());
// ensure file created before rename
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return src;
}
};
runOperationOk(extractor, op, true);
RenameOp rop = new RenameOp(extractor, rnd) {
protected SrcTarget getRenames() {
return new SrcTarget(src, tgt);
}
};
runOperationOk(extractor, rop, true);
}
@Test
public void testMRFlow() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
SliveTest s = new SliveTest(getBaseConfig());
int ec = ToolRunner.run(s, getTestArgs(false));
assertTrue(ec == 0);
String resFile = extractor.getResultFile();
File fn = new File(resFile);
assertTrue(fn.exists());
// can't validate completely since operations may fail (mainly anyone but
// create +mkdir) since they may not find there files
}
@Test
public void testRead() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
final Path fn = new Path(getTestFile().getCanonicalPath());
// ensure file created before read
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return fn;
}
};
runOperationOk(extractor, op, true);
ReadOp rop = new ReadOp(extractor, rnd) {
protected Path getReadFile() {
return fn;
}
};
runOperationOk(extractor, rop, true);
}
@Test
public void testSleep() throws Exception {
ConfigExtractor extractor = getTestConfig(true);
SleepOp op = new SleepOp(extractor, rnd);
runOperationOk(extractor, op, true);
}
@Test
public void testList() throws Exception {
// ensure dir made
ConfigExtractor extractor = getTestConfig(false);
final Path dir = new Path(getTestDir().getCanonicalPath());
MkdirOp op = new MkdirOp(extractor, rnd) {
protected Path getDirectory() {
return dir;
}
};
runOperationOk(extractor, op, true);
// list it
ListOp lop = new ListOp(extractor, rnd) {
protected Path getDirectory() {
return dir;
}
};
runOperationOk(extractor, lop, true);
}
@Test
public void testBadChunks() throws Exception {
File fn = getTestFile();
int byteAm = 10000;
FileOutputStream fout = new FileOutputStream(fn);
byte[] bytes = new byte[byteAm];
rnd.nextBytes(bytes);
fout.write(bytes);
fout.close();
// attempt to read it
DataVerifier vf = new DataVerifier();
VerifyOutput vout = new VerifyOutput(0, 0, 0, 0);
DataInputStream in = null;
try {
in = new DataInputStream(new FileInputStream(fn));
vout = vf.verifyFile(byteAm, in);
} catch (Exception e) {
} finally {
if(in != null) in.close();
}
assertTrue(vout.getChunksSame() == 0);
}
@Test
public void testMkdir() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
final Path dir = new Path(getTestDir().getCanonicalPath());
MkdirOp op = new MkdirOp(extractor, rnd) {
protected Path getDirectory() {
return dir;
}
};
runOperationOk(extractor, op, true);
}
@Test
public void testSelector() throws Exception {
ConfigExtractor extractor = getTestConfig(false);
RouletteSelector selector = new RouletteSelector(rnd);
List<OperationWeight> sList = new LinkedList<OperationWeight>();
Operation op = selector.select(sList);
assertTrue(op == null);
CreateOp cop = new CreateOp(extractor, rnd);
sList.add(new OperationWeight(cop, 1.0d));
AppendOp aop = new AppendOp(extractor, rnd);
sList.add(new OperationWeight(aop, 0.01d));
op = selector.select(sList);
assertTrue(op == cop);
}
@Test
public void testAppendOp() throws Exception {
// setup a valid config
ConfigExtractor extractor = getTestConfig(false);
// ensure file created before append
final Path fn = new Path(getTestFile().getCanonicalPath());
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return fn;
}
};
runOperationOk(extractor, op, true);
// local file system (ChecksumFileSystem) currently doesn't support append -
// but we'll leave this test here anyways but can't check the results..
AppendOp aop = new AppendOp(extractor, rnd) {
protected Path getAppendFile() {
return fn;
}
};
runOperationOk(extractor, aop, false);
}
@Test
public void testTruncateOp() throws Exception {
// setup a valid config
ConfigExtractor extractor = getTestConfig(false);
// ensure file created before append
final Path fn = new Path(getTestFile().getCanonicalPath());
CreateOp op = new CreateOp(extractor, rnd) {
protected Path getCreateFile() {
return fn;
}
};
runOperationOk(extractor, op, true);
// local file system (ChecksumFileSystem) currently doesn't support truncate -
// but we'll leave this test here anyways but can't check the results..
TruncateOp top = new TruncateOp(extractor, rnd) {
protected Path getTruncateFile() {
return fn;
}
};
runOperationOk(extractor, top, false);
}
}
| 18,228
| 31.263717
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/Timer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
/**
* Simple timer class that abstracts time access
*/
class Timer {
// no construction allowed
private Timer() {
}
/**
* The current time in milliseconds
*
* @return long (milliseconds)
*/
static long now() {
return System.currentTimeMillis();
}
/**
* Calculates how much time in milliseconds elapsed from given start time to
* the current time in milliseconds
*
* @param startTime
* @return elapsed time (milliseconds)
*/
static long elapsed(long startTime) {
long elapsedTime = now() - startTime;
if (elapsedTime < 0) {
elapsedTime = 0;
}
return elapsedTime;
}
}
| 1,494
| 25.696429
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DataVerifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* Class which reads in and verifies bytes that have been read in
*/
class DataVerifier {
private static final int BYTES_PER_LONG = Constants.BYTES_PER_LONG;
private int bufferSize;
/**
* The output from verification includes the number of chunks that were the
* same as expected and the number of segments that were different than what
* was expected and the number of total bytes read
*/
static class VerifyOutput {
private long same;
private long different;
private long read;
private long readTime;
VerifyOutput(long sameChunks, long differentChunks, long readBytes,
long readTime) {
this.same = sameChunks;
this.different = differentChunks;
this.read = readBytes;
this.readTime = readTime;
}
long getReadTime() {
return this.readTime;
}
long getBytesRead() {
return this.read;
}
long getChunksSame() {
return same;
}
long getChunksDifferent() {
return different;
}
public String toString() {
return "Bytes read = " + getBytesRead() + " same = " + getChunksSame()
+ " different = " + getChunksDifferent() + " in " + getReadTime()
+ " milliseconds";
}
}
/**
* Class used to hold the result of a read on a header
*/
private static class ReadInfo {
private long byteAm;
private long hash;
private long timeTaken;
private long bytesRead;
ReadInfo(long byteAm, long hash, long timeTaken, long bytesRead) {
this.byteAm = byteAm;
this.hash = hash;
this.timeTaken = timeTaken;
this.bytesRead = bytesRead;
}
long getByteAm() {
return byteAm;
}
long getHashValue() {
return hash;
}
long getTimeTaken() {
return timeTaken;
}
long getBytesRead() {
return bytesRead;
}
}
/**
* Storage class used to hold the chunks same and different for buffered reads
* and the resultant verification
*/
private static class VerifyInfo {
VerifyInfo(long same, long different) {
this.same = same;
this.different = different;
}
long getSame() {
return same;
}
long getDifferent() {
return different;
}
private long same;
private long different;
}
/**
* Inits with given buffer size (must be greater than bytes per long and a
* multiple of bytes per long)
*
* @param bufferSize
* size which must be greater than BYTES_PER_LONG and which also must
* be a multiple of BYTES_PER_LONG
*/
DataVerifier(int bufferSize) {
if (bufferSize < BYTES_PER_LONG) {
throw new IllegalArgumentException(
"Buffer size must be greater than or equal to " + BYTES_PER_LONG);
}
if ((bufferSize % BYTES_PER_LONG) != 0) {
throw new IllegalArgumentException("Buffer size must be a multiple of "
+ BYTES_PER_LONG);
}
this.bufferSize = bufferSize;
}
/**
* Inits with the default buffer size
*/
DataVerifier() {
this(Constants.BUFFERSIZE);
}
/**
* Verifies a buffer of a given size using the given start hash offset
*
* @param buf
* the buffer to verify
* @param size
* the number of bytes to be used in that buffer
* @param startOffset
* the start hash offset
* @param hasher
* the hasher to use for calculating expected values
*
* @return ResumeBytes a set of data about the next offset and chunks analyzed
*/
private VerifyInfo verifyBuffer(ByteBuffer buf, int size, long startOffset,
DataHasher hasher) {
ByteBuffer cmpBuf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
long hashOffset = startOffset;
long chunksSame = 0;
long chunksDifferent = 0;
for (long i = 0; i < size; ++i) {
cmpBuf.put(buf.get());
if (!cmpBuf.hasRemaining()) {
cmpBuf.rewind();
long receivedData = cmpBuf.getLong();
cmpBuf.rewind();
long expected = hasher.generate(hashOffset);
hashOffset += BYTES_PER_LONG;
if (receivedData == expected) {
++chunksSame;
} else {
++chunksDifferent;
}
}
}
// any left over??
if (cmpBuf.hasRemaining() && cmpBuf.position() != 0) {
// partial capture
// zero fill and compare with zero filled
int curSize = cmpBuf.position();
while (cmpBuf.hasRemaining()) {
cmpBuf.put((byte) 0);
}
long expected = hasher.generate(hashOffset);
ByteBuffer tempBuf = ByteBuffer.wrap(new byte[BYTES_PER_LONG]);
tempBuf.putLong(expected);
tempBuf.position(curSize);
while (tempBuf.hasRemaining()) {
tempBuf.put((byte) 0);
}
cmpBuf.rewind();
tempBuf.rewind();
if (cmpBuf.equals(tempBuf)) {
++chunksSame;
} else {
++chunksDifferent;
}
}
return new VerifyInfo(chunksSame, chunksDifferent);
}
/**
* Determines the offset to use given a byte counter
*
* @param byteRead
*
* @return offset position
*/
private long determineOffset(long byteRead) {
if (byteRead < 0) {
byteRead = 0;
}
return (byteRead / BYTES_PER_LONG) * BYTES_PER_LONG;
}
/**
* Verifies a given number of bytes from a file - less number of bytes may be
* read if a header can not be read in due to the byte limit
*
* @param byteAm
* the byte amount to limit to (should be less than or equal to file
* size)
*
* @param in
* the input stream to read from
*
* @return VerifyOutput with data about reads
*
* @throws IOException
* if a read failure occurs
*
* @throws BadFileException
* if a header can not be read or end of file is reached
* unexpectedly
*/
VerifyOutput verifyFile(long byteAm, DataInputStream in)
throws IOException, BadFileException {
return verifyBytes(byteAm, 0, in);
}
/**
* Verifies a given number of bytes from a file - less number of bytes may be
* read if a header can not be read in due to the byte limit
*
* @param byteAm
* the byte amount to limit to (should be less than or equal to file
* size)
*
* @param bytesRead
* the starting byte location
*
* @param in
* the input stream to read from
*
* @return VerifyOutput with data about reads
*
* @throws IOException
* if a read failure occurs
*
* @throws BadFileException
* if a header can not be read or end of file is reached
* unexpectedly
*/
private VerifyOutput verifyBytes(long byteAm, long bytesRead,
DataInputStream in) throws IOException, BadFileException {
if (byteAm <= 0) {
return new VerifyOutput(0, 0, 0, 0);
}
long chunksSame = 0;
long chunksDifferent = 0;
long readTime = 0;
long bytesLeft = byteAm;
long bufLeft = 0;
long bufRead = 0;
long seqNum = 0;
DataHasher hasher = null;
ByteBuffer readBuf = ByteBuffer.wrap(new byte[bufferSize]);
while (bytesLeft > 0) {
if (bufLeft <= 0) {
if (bytesLeft < DataWriter.getHeaderLength()) {
// no bytes left to read a header
break;
}
// time to read a new header
ReadInfo header = null;
try {
header = readHeader(in);
} catch (EOFException e) {
// eof ok on header reads
// but not on data readers
break;
}
++seqNum;
hasher = new DataHasher(header.getHashValue());
bufLeft = header.getByteAm();
readTime += header.getTimeTaken();
bytesRead += header.getBytesRead();
bytesLeft -= header.getBytesRead();
bufRead = 0;
// number of bytes to read greater than how many we want to read
if (bufLeft > bytesLeft) {
bufLeft = bytesLeft;
}
// does the buffer amount have anything??
if (bufLeft <= 0) {
continue;
}
}
// figure out the buffer size to read
int bufSize = bufferSize;
if (bytesLeft < bufSize) {
bufSize = (int) bytesLeft;
}
if (bufLeft < bufSize) {
bufSize = (int) bufLeft;
}
// read it in
try {
readBuf.rewind();
long startTime = Timer.now();
in.readFully(readBuf.array(), 0, bufSize);
readTime += Timer.elapsed(startTime);
} catch (EOFException e) {
throw new BadFileException(
"Could not read the number of expected data bytes " + bufSize
+ " due to unexpected end of file during sequence " + seqNum, e);
}
// update the counters
bytesRead += bufSize;
bytesLeft -= bufSize;
bufLeft -= bufSize;
// verify what we read
readBuf.rewind();
// figure out the expected hash offset start point
long vOffset = determineOffset(bufRead);
// now update for new position
bufRead += bufSize;
// verify
VerifyInfo verifyRes = verifyBuffer(readBuf, bufSize, vOffset, hasher);
// update the verification counters
chunksSame += verifyRes.getSame();
chunksDifferent += verifyRes.getDifferent();
}
return new VerifyOutput(chunksSame, chunksDifferent, bytesRead, readTime);
}
/**
* Reads a header from the given input stream
*
* @param in
* input stream to read from
*
* @return ReadInfo
*
* @throws IOException
* if a read error occurs or EOF occurs
*
* @throws BadFileException
* if end of file occurs or the byte amount read is invalid
*/
ReadInfo readHeader(DataInputStream in) throws IOException,
BadFileException {
int headerLen = DataWriter.getHeaderLength();
ByteBuffer headerBuf = ByteBuffer.wrap(new byte[headerLen]);
long elapsed = 0;
{
long startTime = Timer.now();
in.readFully(headerBuf.array());
elapsed += Timer.elapsed(startTime);
}
headerBuf.rewind();
long hashValue = headerBuf.getLong();
long byteAvailable = headerBuf.getLong();
if (byteAvailable < 0) {
throw new BadFileException("Invalid negative amount " + byteAvailable
+ " determined for header data amount");
}
return new ReadInfo(byteAvailable, hashValue, elapsed, headerLen);
}
}
| 11,426
| 27.425373
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SleepOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
/**
* Operation which sleeps for a given number of milliseconds according to the
* config given, and reports on the sleep time overall
*/
class SleepOp extends Operation {
private static final Log LOG = LogFactory.getLog(SleepOp.class);
SleepOp(ConfigExtractor cfg, Random rnd) {
super(SleepOp.class.getSimpleName(), cfg, rnd);
}
protected long getSleepTime(Range<Long> sleepTime) {
long sleepMs = Range.betweenPositive(getRandom(), sleepTime);
return sleepMs;
}
/**
* Sleep for a random amount of time between a given positive range
*
* @param sleepTime
* positive long range for times to choose
*
* @return output data on operation
*/
List<OperationOutput> run(Range<Long> sleepTime) {
List<OperationOutput> out = super.run(null);
try {
if (sleepTime != null) {
long sleepMs = getSleepTime(sleepTime);
long startTime = Timer.now();
sleep(sleepMs);
long elapsedTime = Timer.elapsed(startTime);
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, elapsedTime));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
}
} catch (InterruptedException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with sleeping", e);
}
return out;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
Range<Long> sleepTime = getConfig().getSleepRange();
return run(sleepTime);
}
/**
* Sleeps the current thread for X milliseconds
*
* @param ms
* milliseconds to sleep for
*
* @throws InterruptedException
*/
private void sleep(long ms) throws InterruptedException {
if (ms <= 0) {
return;
}
Thread.sleep(ms);
}
}
| 2,979
| 30.041667
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ArgumentParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.apache.hadoop.fs.slive.Constants.Distribution;
import org.apache.hadoop.fs.slive.Constants.OperationType;
import org.apache.hadoop.util.StringUtils;
/**
* Class which abstracts the parsing of command line arguments for slive test
*/
class ArgumentParser {
private Options optList;
private String[] argumentList;
private ParsedOutput parsed;
/**
* Result of a parse is the following object
*/
static class ParsedOutput {
private CommandLine parsedData;
private ArgumentParser source;
private boolean needHelp;
ParsedOutput(CommandLine parsedData, ArgumentParser source,
boolean needHelp) {
this.parsedData = parsedData;
this.source = source;
this.needHelp = needHelp;
}
/**
* @return whether the calling object should call output help and exit
*/
boolean shouldOutputHelp() {
return needHelp;
}
/**
* Outputs the formatted help to standard out
*/
void outputHelp() {
if (!shouldOutputHelp()) {
return;
}
if (source != null) {
HelpFormatter hlp = new HelpFormatter();
hlp.printHelp(Constants.PROG_NAME + " " + Constants.PROG_VERSION,
source.getOptionList());
}
}
/**
* @param optName
* the option name to get the value for
*
* @return the option value or null if it does not exist
*/
String getValue(String optName) {
if (parsedData == null) {
return null;
}
return parsedData.getOptionValue(optName);
}
public String toString() {
StringBuilder s = new StringBuilder();
if (parsedData != null) {
Option[] ops = parsedData.getOptions();
for (int i = 0; i < ops.length; ++i) {
s.append(ops[i].getOpt() + " = " + s.append(ops[i].getValue()) + ",");
}
}
return s.toString();
}
}
ArgumentParser(String[] args) {
optList = getOptions();
if (args == null) {
args = new String[] {};
}
argumentList = args;
parsed = null;
}
private Options getOptionList() {
return optList;
}
/**
* Parses the command line options
*
* @return false if need to print help output
*
* @throws Exception
* when parsing fails
*/
ParsedOutput parse() throws Exception {
if (parsed == null) {
PosixParser parser = new PosixParser();
CommandLine popts = parser.parse(getOptionList(), argumentList, true);
if (popts.hasOption(ConfigOption.HELP.getOpt())) {
parsed = new ParsedOutput(null, this, true);
} else {
parsed = new ParsedOutput(popts, this, false);
}
}
return parsed;
}
/**
* @return the option set to be used in command line parsing
*/
private Options getOptions() {
Options cliopt = new Options();
cliopt.addOption(ConfigOption.MAPS);
cliopt.addOption(ConfigOption.REDUCES);
cliopt.addOption(ConfigOption.PACKET_SIZE);
cliopt.addOption(ConfigOption.OPS);
cliopt.addOption(ConfigOption.DURATION);
cliopt.addOption(ConfigOption.EXIT_ON_ERROR);
cliopt.addOption(ConfigOption.SLEEP_TIME);
cliopt.addOption(ConfigOption.TRUNCATE_WAIT);
cliopt.addOption(ConfigOption.FILES);
cliopt.addOption(ConfigOption.DIR_SIZE);
cliopt.addOption(ConfigOption.BASE_DIR);
cliopt.addOption(ConfigOption.RESULT_FILE);
cliopt.addOption(ConfigOption.CLEANUP);
{
String distStrs[] = new String[Distribution.values().length];
Distribution distValues[] = Distribution.values();
for (int i = 0; i < distValues.length; ++i) {
distStrs[i] = distValues[i].lowerName();
}
String opdesc = String.format(Constants.OP_DESCR, StringUtils
.arrayToString(distStrs));
for (OperationType type : OperationType.values()) {
String opname = type.lowerName();
cliopt.addOption(new Option(opname, true, opdesc));
}
}
cliopt.addOption(ConfigOption.REPLICATION_AM);
cliopt.addOption(ConfigOption.BLOCK_SIZE);
cliopt.addOption(ConfigOption.READ_SIZE);
cliopt.addOption(ConfigOption.WRITE_SIZE);
cliopt.addOption(ConfigOption.APPEND_SIZE);
cliopt.addOption(ConfigOption.TRUNCATE_SIZE);
cliopt.addOption(ConfigOption.RANDOM_SEED);
cliopt.addOption(ConfigOption.QUEUE_NAME);
cliopt.addOption(ConfigOption.HELP);
return cliopt;
}
}
| 5,472
| 29.575419
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveMapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.slive;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.StringUtils;
/**
* The slive class which sets up the mapper to be used which itself will receive
* a single dummy key and value and then in a loop run the various operations
* that have been selected and upon operation completion output the collected
* output from that operation (and repeat until finished).
*/
public class SliveMapper extends MapReduceBase implements
Mapper<Object, Object, Text, Text> {
private static final Log LOG = LogFactory.getLog(SliveMapper.class);
private static final String OP_TYPE = SliveMapper.class.getSimpleName();
private FileSystem filesystem;
private ConfigExtractor config;
private int taskId;
/*
* (non-Javadoc)
*
* @see
* org.apache.hadoop.mapred.MapReduceBase#configure(org.apache.hadoop.mapred
* .JobConf)
*/
@Override // MapReduceBase
public void configure(JobConf conf) {
try {
config = new ConfigExtractor(conf);
ConfigExtractor.dumpOptions(config);
filesystem = config.getBaseDirectory().getFileSystem(conf);
} catch (Exception e) {
LOG.error("Unable to setup slive " + StringUtils.stringifyException(e));
throw new RuntimeException("Unable to setup slive configuration", e);
}
if(conf.get(MRJobConfig.TASK_ATTEMPT_ID) != null ) {
this.taskId = TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
.getTaskID().getId();
} else {
// So that branch-1/0.20 can run this same code as well
this.taskId = TaskAttemptID.forName(conf.get("mapred.task.id"))
.getTaskID().getId();
}
}
/**
* Fetches the config this object uses
*
* @return ConfigExtractor
*/
private ConfigExtractor getConfig() {
return config;
}
/**
* Logs to the given reporter and logs to the internal logger at info level
*
* @param r
* the reporter to set status on
* @param msg
* the message to log
*/
private void logAndSetStatus(Reporter r, String msg) {
r.setStatus(msg);
LOG.info(msg);
}
/**
* Runs the given operation and reports on its results
*
* @param op
* the operation to run
* @param reporter
* the status reporter to notify
* @param output
* the output to write to
* @throws IOException
*/
private void runOperation(Operation op, Reporter reporter,
OutputCollector<Text, Text> output, long opNum) throws IOException {
if (op == null) {
return;
}
logAndSetStatus(reporter, "Running operation #" + opNum + " (" + op + ")");
List<OperationOutput> opOut = op.run(filesystem);
logAndSetStatus(reporter, "Finished operation #" + opNum + " (" + op + ")");
if (opOut != null && !opOut.isEmpty()) {
for (OperationOutput outData : opOut) {
output.collect(outData.getKey(), outData.getOutputValue());
}
}
}
/*
* (non-Javadoc)
*
* @see org.apache.hadoop.mapred.Mapper#map(java.lang.Object,
* java.lang.Object, org.apache.hadoop.mapred.OutputCollector,
* org.apache.hadoop.mapred.Reporter)
*/
@Override // Mapper
public void map(Object key, Object value, OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
logAndSetStatus(reporter, "Running slive mapper for dummy key " + key
+ " and dummy value " + value);
//Add taskID to randomSeed to deterministically seed rnd.
Random rnd = config.getRandomSeed() != null ?
new Random(this.taskId + config.getRandomSeed()) : new Random();
WeightSelector selector = new WeightSelector(config, rnd);
long startTime = Timer.now();
long opAm = 0;
long sleepOps = 0;
int duration = getConfig().getDurationMilliseconds();
Range<Long> sleepRange = getConfig().getSleepRange();
Operation sleeper = null;
if (sleepRange != null) {
sleeper = new SleepOp(getConfig(), rnd);
}
while (Timer.elapsed(startTime) < duration) {
try {
logAndSetStatus(reporter, "Attempting to select operation #"
+ (opAm + 1));
int currElapsed = (int) (Timer.elapsed(startTime));
Operation op = selector.select(currElapsed, duration);
if (op == null) {
// no ops left
break;
} else {
// got a good op
++opAm;
runOperation(op, reporter, output, opAm);
}
// do a sleep??
if (sleeper != null) {
// these don't count against the number of operations
++sleepOps;
runOperation(sleeper, reporter, output, sleepOps);
}
} catch (Exception e) {
logAndSetStatus(reporter, "Failed at running due to "
+ StringUtils.stringifyException(e));
if (getConfig().shouldExitOnFirstError()) {
break;
}
}
}
// write out any accumulated mapper stats
{
long timeTaken = Timer.elapsed(startTime);
OperationOutput opCount = new OperationOutput(OutputType.LONG, OP_TYPE,
ReportWriter.OP_COUNT, opAm);
output.collect(opCount.getKey(), opCount.getOutputValue());
OperationOutput overallTime = new OperationOutput(OutputType.LONG,
OP_TYPE, ReportWriter.OK_TIME_TAKEN, timeTaken);
output.collect(overallTime.getKey(), overallTime.getOutputValue());
logAndSetStatus(reporter, "Finished " + opAm + " operations in "
+ timeTaken + " milliseconds");
}
}
}
| 6,915
| 33.929293
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestNoDefaultsJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import org.junit.Assert;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import java.io.*;
/**
* This testcase tests that a JobConf without default values submits jobs
* properly and the JT applies its own default values to it to make the job
* run properly.
*/
public class TestNoDefaultsJobConf extends HadoopTestCase {
public TestNoDefaultsJobConf() throws IOException {
super(HadoopTestCase.CLUSTER_MR, HadoopTestCase.DFS_FS, 1, 1);
}
public void testNoDefaults() throws Exception {
JobConf configuration = new JobConf();
assertTrue(configuration.get("hadoop.tmp.dir", null) != null);
configuration = new JobConf(false);
assertTrue(configuration.get("hadoop.tmp.dir", null) == null);
Path inDir = new Path("testing/jobconf/input");
Path outDir = new Path("testing/jobconf/output");
OutputStream os = getFileSystem().create(new Path(inDir, "text.txt"));
Writer wr = new OutputStreamWriter(os);
wr.write("hello\n");
wr.write("hello\n");
wr.close();
JobConf conf = new JobConf(false);
conf.set("fs.defaultFS", createJobConf().get("fs.defaultFS"));
conf.setJobName("mr");
conf.setInputFormat(TextInputFormat.class);
conf.setMapOutputKeyClass(LongWritable.class);
conf.setMapOutputValueClass(Text.class);
conf.setOutputFormat(TextOutputFormat.class);
conf.setOutputKeyClass(LongWritable.class);
conf.setOutputValueClass(Text.class);
conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class);
conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);
FileInputFormat.setInputPaths(conf, inDir);
FileOutputFormat.setOutputPath(conf, outDir);
JobClient.runJob(conf);
Path[] outputFiles = FileUtil.stat2Paths(
getFileSystem().listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
if (outputFiles.length > 0) {
InputStream is = getFileSystem().open(outputFiles[0]);
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
String line = reader.readLine();
int counter = 0;
while (line != null) {
counter++;
assertTrue(line.contains("hello"));
line = reader.readLine();
}
reader.close();
assertEquals(2, counter);
}
}
}
| 3,429
| 31.980769
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/ipc/TestMRCJCSocketFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
import org.apache.hadoop.net.StandardSocketFactory;
import org.junit.Assert;
import org.junit.Test;
/**
* This class checks that RPCs can use specialized socket factories.
*/
public class TestMRCJCSocketFactory {
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test
public void testSocketFactory() throws IOException {
// Create a standard mini-cluster
Configuration sconf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(sconf).numDataNodes(1)
.build();
final int nameNodePort = cluster.getNameNodePort();
// Get a reference to its DFS directly
FileSystem fs = cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs = (DistributedFileSystem) fs;
Configuration cconf = getCustomSocketConfigs(nameNodePort);
fs = FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
JobClient client = null;
MiniMRYarnCluster miniMRYarnCluster = null;
try {
// This will test RPC to the NameNode only.
// could we test Client-DataNode connections?
Path filePath = new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
// This will test RPC to a Resource Manager
fs = FileSystem.get(sconf);
JobConf jobConf = new JobConf();
FileSystem.setDefaultUri(jobConf, fs.getUri().toString());
miniMRYarnCluster = initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf = new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default",
"org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress = jconf.get("yarn.resourcemanager.address");
String[] split = rmAddress.split(":");
jconf.set("yarn.resourcemanager.address", split[0] + ':'
+ (Integer.parseInt(split[1]) + 10));
client = new JobClient(jconf);
JobStatus[] jobs = client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
} finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
private MiniMRYarnCluster initAndStartMiniMRYarnCluster(JobConf jobConf) {
MiniMRYarnCluster miniMRYarnCluster;
miniMRYarnCluster = new MiniMRYarnCluster(this.getClass().getName(), 1);
miniMRYarnCluster.init(jobConf);
miniMRYarnCluster.start();
return miniMRYarnCluster;
}
private Configuration getCustomSocketConfigs(final int nameNodePort) {
// Get another reference via network using a specific socket factory
Configuration cconf = new Configuration();
FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
nameNodePort + 10));
cconf.set("hadoop.rpc.socket.factory.class.default",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
"org.apache.hadoop.ipc.DummySocketFactory");
return cconf;
}
private void shutdownDFSCluster(MiniDFSCluster cluster) {
try {
if (cluster != null)
cluster.shutdown();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void stopMiniMRYarnCluster(MiniMRYarnCluster miniMRYarnCluster) {
try {
if (miniMRYarnCluster != null)
miniMRYarnCluster.stop();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void closeDfs(DistributedFileSystem dfs) {
try {
if (dfs != null)
dfs.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
private void closeClient(JobClient client) {
try {
if (client != null)
client.close();
} catch (Exception ignored) {
// nothing we can do
ignored.printStackTrace();
}
}
}
/**
* Dummy socket factory which shift TPC ports by subtracting 10 when
* establishing a connection
*/
class DummySocketFactory extends StandardSocketFactory {
/**
* Default empty constructor (for use with the reflection API).
*/
public DummySocketFactory() {
}
@Override
public Socket createSocket() throws IOException {
return new Socket() {
@Override
public void connect(SocketAddress addr, int timeout) throws IOException {
assert (addr instanceof InetSocketAddress);
InetSocketAddress iaddr = (InetSocketAddress) addr;
SocketAddress newAddr = null;
if (iaddr.isUnresolved())
newAddr = new InetSocketAddress(iaddr.getHostName(),
iaddr.getPort() - 10);
else
newAddr = new InetSocketAddress(iaddr.getAddress(),
iaddr.getPort() - 10);
System.out.printf("Test socket: rerouting %s to %s\n", iaddr, newAddr);
super.connect(newAddr, timeout);
}
};
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (!(obj instanceof DummySocketFactory))
return false;
return true;
}
}
| 7,098
| 31.714286
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fi/FiConfig.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/**
* This class wraps the logic around fault injection configuration file
* Default file is expected to be found in src/test/fi-site.xml
* This default file should be copied by JUnit Ant's tasks to
* build/test/extraconf folder before tests are ran
* An alternative location can be set through
* -Dfi.config=<file_name>
*/
public class FiConfig {
private static final String CONFIG_PARAMETER = ProbabilityModel.FPROB_NAME + "config";
private static final String DEFAULT_CONFIG = "fi-site.xml";
private static Configuration conf;
static {
init();
}
protected static void init () {
if (conf == null) {
conf = new HdfsConfiguration(false);
String configName = System.getProperty(CONFIG_PARAMETER, DEFAULT_CONFIG);
conf.addResource(configName);
}
}
/**
* Method provides access to local Configuration
*
* @return Configuration initialized with fault injection's parameters
*/
public static Configuration getConfig() {
return conf;
}
}
| 1,948
| 33.803571
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fi/ProbabilityModel.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
/**
* This class is responsible for the decision of when a fault
* has to be triggered within a class of Hadoop
*
* Default probability of injection is set to 0%. To change it
* one can set the sys. prop. -Dfi.*=<new probability level>
* Another way to do so is to set this level through FI config file,
* located under src/test/fi-site.conf
*
* To change the level one has to specify the following sys,prop.:
* -Dfi.<name of fault location>=<probability level> in the runtime
* Probability level is specified by a float between 0.0 and 1.0
*
* <name of fault location> might be represented by a short classname
* or otherwise. This decision is left up to the discretion of aspects
* developer, but has to be consistent through the code
*/
public class ProbabilityModel {
private static Random generator = new Random();
private static final Log LOG = LogFactory.getLog(ProbabilityModel.class);
static final String FPROB_NAME = "fi.";
private static final String ALL_PROBABILITIES = FPROB_NAME + "*";
private static final float DEFAULT_PROB = 0.00f; //Default probability is 0%
private static final float MAX_PROB = 1.00f; // Max probability is 100%
private static Configuration conf = FiConfig.getConfig();
static {
// Set new default probability if specified through a system.property
// If neither is specified set default probability to DEFAULT_PROB
conf.set(ALL_PROBABILITIES,
System.getProperty(ALL_PROBABILITIES,
conf.get(ALL_PROBABILITIES, Float.toString(DEFAULT_PROB))));
LOG.info(ALL_PROBABILITIES + "=" + conf.get(ALL_PROBABILITIES));
}
/**
* Simplistic method to check if we have reached the point of injection
* @param klassName is the name of the probability level to check.
* If a configuration has been set for "fi.myClass" then you can check if the
* inject criteria has been reached by calling this method with "myClass"
* string as its parameter
* @return true if the probability threshold has been reached; false otherwise
*/
public static boolean injectCriteria(String klassName) {
boolean trigger = false;
// TODO fix this: make it more sophisticated!!!
if (generator.nextFloat() < getProbability(klassName)) {
trigger = true;
}
return trigger;
}
/**
* This primitive checks for arbitrary set of desired probability. If the
* level hasn't been set method will return default setting.
* The probability expected to be set as an float between 0.0 and 1.0
* @param klass is the name of the resource
* @return float representation of configured probability level of
* the requested resource or default value if hasn't been set
*/
protected static float getProbability(final String klass) {
String newProbName = FPROB_NAME + klass;
String newValue = System.getProperty(newProbName, conf.get(ALL_PROBABILITIES));
if (newValue != null && !newValue.equals(conf.get(newProbName)))
conf.set(newProbName, newValue);
float ret = conf.getFloat(newProbName,
conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB));
LOG.debug("Request for " + newProbName + " returns=" + ret);
// Make sure that probability level is valid.
if (ret < DEFAULT_PROB || ret > MAX_PROB)
ret = conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB);
return ret;
}
}
| 4,370
| 40.235849
| 83
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.mapred.JobConf;
/**
* This program executes a specified operation that applies load to
* the NameNode. Possible operations include create/writing files,
* opening/reading files, renaming files, and deleting files.
*
* When run simultaneously on multiple nodes, this program functions
* as a stress-test and benchmark for namenode, especially when
* the number of bytes written to each file is small.
*
* This version does not use the map reduce framework
*
*/
public class NNBenchWithoutMR {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.NNBench");
// variable initialzed from command line arguments
private static long startTime = 0;
private static int numFiles = 0;
private static long bytesPerBlock = 1;
private static long blocksPerFile = 0;
private static long bytesPerFile = 1;
private static short replicationFactorPerFile = 1; // default is 1
private static Path baseDir = null;
// variables initialized in main()
private static FileSystem fileSys = null;
private static Path taskDir = null;
private static byte[] buffer;
private static long maxExceptionsPerFile = 200;
/**
* Returns when the current number of seconds from the epoch equals
* the command line argument given by <code>-startTime</code>.
* This allows multiple instances of this program, running on clock
* synchronized nodes, to start at roughly the same time.
*/
static void barrier() {
long sleepTime;
while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
try {
Thread.sleep(sleepTime);
} catch (InterruptedException ex) {
//This left empty on purpose
}
}
}
static private void handleException(String operation, Throwable e,
int singleFileExceptions) {
LOG.warn("Exception while " + operation + ": " +
StringUtils.stringifyException(e));
if (singleFileExceptions >= maxExceptionsPerFile) {
throw new RuntimeException(singleFileExceptions +
" exceptions for a single file exceeds threshold. Aborting");
}
}
/**
* Create and write to a given number of files. Repeat each remote
* operation until is suceeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
static int createWrite() {
int totalExceptions = 0;
FSDataOutputStream out = null;
boolean success;
for (int index = 0; index < numFiles; index++) {
int singleFileExceptions = 0;
do { // create file until is succeeds or max exceptions reached
try {
out = fileSys.create(
new Path(taskDir, "" + index), false, 512,
(short)replicationFactorPerFile, bytesPerBlock);
success = true;
} catch (IOException ioe) {
success=false;
totalExceptions++;
handleException("creating file #" + index, ioe,
++singleFileExceptions);
}
} while (!success);
long toBeWritten = bytesPerFile;
while (toBeWritten > 0) {
int nbytes = (int) Math.min(buffer.length, toBeWritten);
toBeWritten -= nbytes;
try { // only try once
out.write(buffer, 0, nbytes);
} catch (IOException ioe) {
totalExceptions++;
handleException("writing to file #" + index, ioe,
++singleFileExceptions);
}
}
do { // close file until is succeeds
try {
out.close();
success = true;
} catch (IOException ioe) {
success=false;
totalExceptions++;
handleException("closing file #" + index, ioe,
++singleFileExceptions);
}
} while (!success);
}
return totalExceptions;
}
/**
* Open and read a given number of files.
*
* @return the number of exceptions caught
*/
static int openRead() {
int totalExceptions = 0;
FSDataInputStream in;
for (int index = 0; index < numFiles; index++) {
int singleFileExceptions = 0;
try {
in = fileSys.open(new Path(taskDir, "" + index), 512);
long toBeRead = bytesPerFile;
while (toBeRead > 0) {
int nbytes = (int) Math.min(buffer.length, toBeRead);
toBeRead -= nbytes;
try { // only try once && we don't care about a number of bytes read
in.read(buffer, 0, nbytes);
} catch (IOException ioe) {
totalExceptions++;
handleException("reading from file #" + index, ioe,
++singleFileExceptions);
}
}
in.close();
} catch (IOException ioe) {
totalExceptions++;
handleException("opening file #" + index, ioe, ++singleFileExceptions);
}
}
return totalExceptions;
}
/**
* Rename a given number of files. Repeat each remote
* operation until is suceeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
static int rename() {
int totalExceptions = 0;
boolean success;
for (int index = 0; index < numFiles; index++) {
int singleFileExceptions = 0;
do { // rename file until is succeeds
try {
// Possible result of this operation is at no interest to us for it
// can return false only if the namesystem
// could rename the path from the name
// space (e.g. no Exception has been thrown)
fileSys.rename(new Path(taskDir, "" + index),
new Path(taskDir, "A" + index));
success = true;
} catch (IOException ioe) {
success = false;
totalExceptions++;
handleException("creating file #" + index, ioe, ++singleFileExceptions);
}
} while (!success);
}
return totalExceptions;
}
/**
* Delete a given number of files. Repeat each remote
* operation until is suceeds (does not throw an exception).
*
* @return the number of exceptions caught
*/
static int delete() {
int totalExceptions = 0;
boolean success;
for (int index = 0; index < numFiles; index++) {
int singleFileExceptions = 0;
do { // delete file until is succeeds
try {
// Possible result of this operation is at no interest to us for it
// can return false only if namesystem
// delete could remove the path from the name
// space (e.g. no Exception has been thrown)
fileSys.delete(new Path(taskDir, "A" + index), true);
success = true;
} catch (IOException ioe) {
success=false;
totalExceptions++;
handleException("creating file #" + index, ioe, ++singleFileExceptions);
}
} while (!success);
}
return totalExceptions;
}
/**
* This launches a given namenode operation (<code>-operation</code>),
* starting at a given time (<code>-startTime</code>). The files used
* by the openRead, rename, and delete operations are the same files
* created by the createWrite operation. Typically, the program
* would be run four times, once for each operation in this order:
* createWrite, openRead, rename, delete.
*
* <pre>
* Usage: nnbench
* -operation <one of createWrite, openRead, rename, or delete>
* -baseDir <base output/input DFS path>
* -startTime <time to start, given in seconds from the epoch>
* -numFiles <number of files to create, read, rename, or delete>
* -blocksPerFile <number of blocks to create per file>
* [-bytesPerBlock <number of bytes to write to each block, default is 1>]
* [-bytesPerChecksum <value for io.bytes.per.checksum>]
* </pre>
*
* @param args is an array of the program command line arguments
* @throws IOException indicates a problem with test startup
*/
public static void main(String[] args) throws IOException {
String version = "NameNodeBenchmark.0.3";
System.out.println(version);
int bytesPerChecksum = -1;
String usage =
"Usage: nnbench " +
" -operation <one of createWrite, openRead, rename, or delete>\n " +
" -baseDir <base output/input DFS path>\n " +
" -startTime <time to start, given in seconds from the epoch>\n" +
" -numFiles <number of files to create>\n " +
" -replicationFactorPerFile <Replication factor for the files, default is 1>\n" +
" -blocksPerFile <number of blocks to create per file>\n" +
" [-bytesPerBlock <number of bytes to write to each block, default is 1>]\n" +
" [-bytesPerChecksum <value for io.bytes.per.checksum>]\n" +
"Note: bytesPerBlock MUST be a multiple of bytesPerChecksum\n";
String operation = null;
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("-baseDir")) {
baseDir = new Path(args[++i]);
} else if (args[i].equals("-numFiles")) {
numFiles = Integer.parseInt(args[++i]);
} else if (args[i].equals("-blocksPerFile")) {
blocksPerFile = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bytesPerBlock")) {
bytesPerBlock = Long.parseLong(args[++i]);
} else if (args[i].equals("-bytesPerChecksum")) {
bytesPerChecksum = Integer.parseInt(args[++i]);
} else if (args[i].equals("-replicationFactorPerFile")) {
replicationFactorPerFile = Short.parseShort(args[++i]);
} else if (args[i].equals("-startTime")) {
startTime = Long.parseLong(args[++i]) * 1000;
} else if (args[i].equals("-operation")) {
operation = args[++i];
} else {
System.out.println(usage);
System.exit(-1);
}
}
bytesPerFile = bytesPerBlock * blocksPerFile;
JobConf jobConf = new JobConf(new Configuration(), NNBench.class);
if ( bytesPerChecksum < 0 ) { // if it is not set in cmdline
bytesPerChecksum = jobConf.getInt("io.bytes.per.checksum", 512);
}
jobConf.set("io.bytes.per.checksum", Integer.toString(bytesPerChecksum));
System.out.println("Inputs: ");
System.out.println(" operation: " + operation);
System.out.println(" baseDir: " + baseDir);
System.out.println(" startTime: " + startTime);
System.out.println(" numFiles: " + numFiles);
System.out.println(" replicationFactorPerFile: " + replicationFactorPerFile);
System.out.println(" blocksPerFile: " + blocksPerFile);
System.out.println(" bytesPerBlock: " + bytesPerBlock);
System.out.println(" bytesPerChecksum: " + bytesPerChecksum);
if (operation == null || // verify args
baseDir == null ||
numFiles < 1 ||
blocksPerFile < 1 ||
bytesPerBlock < 0 ||
bytesPerBlock % bytesPerChecksum != 0)
{
System.err.println(usage);
System.exit(-1);
}
fileSys = FileSystem.get(jobConf);
String uniqueId = java.net.InetAddress.getLocalHost().getHostName();
taskDir = new Path(baseDir, uniqueId);
// initialize buffer used for writing/reading file
buffer = new byte[(int) Math.min(bytesPerFile, 32768L)];
Date execTime;
Date endTime;
long duration;
int exceptions = 0;
barrier(); // wait for coordinated start time
execTime = new Date();
System.out.println("Job started: " + startTime);
if (operation.equals("createWrite")) {
if (!fileSys.mkdirs(taskDir)) {
throw new IOException("Mkdirs failed to create " + taskDir.toString());
}
exceptions = createWrite();
} else if (operation.equals("openRead")) {
exceptions = openRead();
} else if (operation.equals("rename")) {
exceptions = rename();
} else if (operation.equals("delete")) {
exceptions = delete();
} else {
System.err.println(usage);
System.exit(-1);
}
endTime = new Date();
System.out.println("Job ended: " + endTime);
duration = (endTime.getTime() - execTime.getTime()) /1000;
System.out.println("The " + operation + " job took " + duration + " seconds.");
System.out.println("The job recorded " + exceptions + " exceptions.");
}
}
| 13,603
| 36.271233
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Date;
import java.io.DataInputStream;
import java.io.FileOutputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.io.File;
import java.io.BufferedReader;
import java.util.StringTokenizer;
import java.net.InetAddress;
import java.text.SimpleDateFormat;
import java.util.Iterator;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
/**
* This program executes a specified operation that applies load to
* the NameNode.
*
* When run simultaneously on multiple nodes, this program functions
* as a stress-test and benchmark for namenode, especially when
* the number of bytes written to each file is small.
*
* Valid operations are:
* create_write
* open_read
* rename
* delete
*
* NOTE: The open_read, rename and delete operations assume that the files
* they operate on are already available. The create_write operation
* must be run before running the other operations.
*/
public class NNBench {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.NNBench");
protected static String CONTROL_DIR_NAME = "control";
protected static String OUTPUT_DIR_NAME = "output";
protected static String DATA_DIR_NAME = "data";
protected static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log";
protected static final String NNBENCH_VERSION = "NameNode Benchmark 0.4";
public static String operation = "none";
public static long numberOfMaps = 1l; // default is 1
public static long numberOfReduces = 1l; // default is 1
public static long startTime =
System.currentTimeMillis() + (120 * 1000); // default is 'now' + 2min
public static long blockSize = 1l; // default is 1
public static int bytesToWrite = 0; // default is 0
public static long bytesPerChecksum = 1l; // default is 1
public static long numberOfFiles = 1l; // default is 1
public static short replicationFactorPerFile = 1; // default is 1
public static String baseDir = "/benchmarks/NNBench"; // default
public static boolean readFileAfterOpen = false; // default is to not read
// Supported operations
private static final String OP_CREATE_WRITE = "create_write";
private static final String OP_OPEN_READ = "open_read";
private static final String OP_RENAME = "rename";
private static final String OP_DELETE = "delete";
// To display in the format that matches the NN and DN log format
// Example: 2007-10-26 00:01:19,853
static SimpleDateFormat sdf =
new SimpleDateFormat("yyyy-MM-dd' 'HH:mm:ss','S");
private static Configuration config = new Configuration();
/**
* Clean up the files before a test run
*
* @throws IOException on error
*/
private static void cleanupBeforeTestrun() throws IOException {
FileSystem tempFS = FileSystem.get(config);
// Delete the data directory only if it is the create/write operation
if (operation.equals(OP_CREATE_WRITE)) {
LOG.info("Deleting data directory");
tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true);
}
tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true);
tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true);
}
/**
* Create control files before a test run.
* Number of files created is equal to the number of maps specified
*
* @throws IOException on error
*/
private static void createControlFiles() throws IOException {
FileSystem tempFS = FileSystem.get(config);
LOG.info("Creating " + numberOfMaps + " control files");
for (int i = 0; i < numberOfMaps; i++) {
String strFileName = "NNBench_Controlfile_" + i;
Path filePath = new Path(new Path(baseDir, CONTROL_DIR_NAME),
strFileName);
SequenceFile.Writer writer = null;
try {
writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class,
LongWritable.class, CompressionType.NONE);
writer.append(new Text(strFileName), new LongWritable(0l));
} finally {
if (writer != null) {
writer.close();
}
}
}
}
/**
* Display version
*/
private static void displayVersion() {
System.out.println(NNBENCH_VERSION);
}
/**
* Display usage
*/
private static void displayUsage() {
String usage =
"Usage: nnbench <options>\n" +
"Options:\n" +
"\t-operation <Available operations are " + OP_CREATE_WRITE + " " +
OP_OPEN_READ + " " + OP_RENAME + " " + OP_DELETE + ". " +
"This option is mandatory>\n" +
"\t * NOTE: The open_read, rename and delete operations assume " +
"that the files they operate on, are already available. " +
"The create_write operation must be run before running the " +
"other operations.\n" +
"\t-maps <number of maps. default is 1. This is not mandatory>\n" +
"\t-reduces <number of reduces. default is 1. This is not mandatory>\n" +
"\t-startTime <time to start, given in seconds from the epoch. " +
"Make sure this is far enough into the future, so all maps " +
"(operations) will start at the same time. " +
"default is launch time + 2 mins. This is not mandatory>\n" +
"\t-blockSize <Block size in bytes. default is 1. " +
"This is not mandatory>\n" +
"\t-bytesToWrite <Bytes to write. default is 0. " +
"This is not mandatory>\n" +
"\t-bytesPerChecksum <Bytes per checksum for the files. default is 1. " +
"This is not mandatory>\n" +
"\t-numberOfFiles <number of files to create. default is 1. " +
"This is not mandatory>\n" +
"\t-replicationFactorPerFile <Replication factor for the files." +
" default is 1. This is not mandatory>\n" +
"\t-baseDir <base DFS path. default is /benchmarks/NNBench. " +
"This is not mandatory>\n" +
"\t-readFileAfterOpen <true or false. if true, it reads the file and " +
"reports the average time to read. This is valid with the open_read " +
"operation. default is false. This is not mandatory>\n" +
"\t-help: Display the help statement\n";
System.out.println(usage);
}
/**
* check for arguments and fail if the values are not specified
* @param index positional number of an argument in the list of command
* line's arguments
* @param length total number of arguments
*/
public static void checkArgs(final int index, final int length) {
if (index == length) {
displayUsage();
System.exit(-1);
}
}
/**
* Parse input arguments
*
* @param args array of command line's parameters to be parsed
*/
public static void parseInputs(final String[] args) {
// If there are no command line arguments, exit
if (args.length == 0) {
displayUsage();
System.exit(-1);
}
// Parse command line args
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-operation")) {
operation = args[++i];
} else if (args[i].equals("-maps")) {
checkArgs(i + 1, args.length);
numberOfMaps = Long.parseLong(args[++i]);
} else if (args[i].equals("-reduces")) {
checkArgs(i + 1, args.length);
numberOfReduces = Long.parseLong(args[++i]);
} else if (args[i].equals("-startTime")) {
checkArgs(i + 1, args.length);
startTime = Long.parseLong(args[++i]) * 1000;
} else if (args[i].equals("-blockSize")) {
checkArgs(i + 1, args.length);
blockSize = Long.parseLong(args[++i]);
} else if (args[i].equals("-bytesToWrite")) {
checkArgs(i + 1, args.length);
bytesToWrite = Integer.parseInt(args[++i]);
} else if (args[i].equals("-bytesPerChecksum")) {
checkArgs(i + 1, args.length);
bytesPerChecksum = Long.parseLong(args[++i]);
} else if (args[i].equals("-numberOfFiles")) {
checkArgs(i + 1, args.length);
numberOfFiles = Long.parseLong(args[++i]);
} else if (args[i].equals("-replicationFactorPerFile")) {
checkArgs(i + 1, args.length);
replicationFactorPerFile = Short.parseShort(args[++i]);
} else if (args[i].equals("-baseDir")) {
checkArgs(i + 1, args.length);
baseDir = args[++i];
} else if (args[i].equals("-readFileAfterOpen")) {
checkArgs(i + 1, args.length);
readFileAfterOpen = Boolean.parseBoolean(args[++i]);
} else if (args[i].equals("-help")) {
displayUsage();
System.exit(-1);
}
}
LOG.info("Test Inputs: ");
LOG.info(" Test Operation: " + operation);
LOG.info(" Start time: " + sdf.format(new Date(startTime)));
LOG.info(" Number of maps: " + numberOfMaps);
LOG.info(" Number of reduces: " + numberOfReduces);
LOG.info(" Block Size: " + blockSize);
LOG.info(" Bytes to write: " + bytesToWrite);
LOG.info(" Bytes per checksum: " + bytesPerChecksum);
LOG.info(" Number of files: " + numberOfFiles);
LOG.info(" Replication factor: " + replicationFactorPerFile);
LOG.info(" Base dir: " + baseDir);
LOG.info(" Read file after open: " + readFileAfterOpen);
// Set user-defined parameters, so the map method can access the values
config.set("test.nnbench.operation", operation);
config.setLong("test.nnbench.maps", numberOfMaps);
config.setLong("test.nnbench.reduces", numberOfReduces);
config.setLong("test.nnbench.starttime", startTime);
config.setLong("test.nnbench.blocksize", blockSize);
config.setInt("test.nnbench.bytestowrite", bytesToWrite);
config.setLong("test.nnbench.bytesperchecksum", bytesPerChecksum);
config.setLong("test.nnbench.numberoffiles", numberOfFiles);
config.setInt("test.nnbench.replicationfactor",
(int) replicationFactorPerFile);
config.set("test.nnbench.basedir", baseDir);
config.setBoolean("test.nnbench.readFileAfterOpen", readFileAfterOpen);
config.set("test.nnbench.datadir.name", DATA_DIR_NAME);
config.set("test.nnbench.outputdir.name", OUTPUT_DIR_NAME);
config.set("test.nnbench.controldir.name", CONTROL_DIR_NAME);
}
/**
* Analyze the results
*
* @throws IOException on error
*/
private static void analyzeResults() throws IOException {
final FileSystem fs = FileSystem.get(config);
Path reduceFile = new Path(new Path(baseDir, OUTPUT_DIR_NAME),
"part-00000");
DataInputStream in;
in = new DataInputStream(fs.open(reduceFile));
BufferedReader lines;
lines = new BufferedReader(new InputStreamReader(in));
long totalTimeAL1 = 0l;
long totalTimeAL2 = 0l;
long totalTimeTPmS = 0l;
long lateMaps = 0l;
long numOfExceptions = 0l;
long successfulFileOps = 0l;
long mapStartTimeTPmS = 0l;
long mapEndTimeTPmS = 0l;
String resultTPSLine1 = null;
String resultTPSLine2 = null;
String resultALLine1 = null;
String resultALLine2 = null;
String line;
while((line = lines.readLine()) != null) {
StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%;");
String attr = tokens.nextToken();
if (attr.endsWith(":totalTimeAL1")) {
totalTimeAL1 = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":totalTimeAL2")) {
totalTimeAL2 = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":totalTimeTPmS")) {
totalTimeTPmS = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":latemaps")) {
lateMaps = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":numOfExceptions")) {
numOfExceptions = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":successfulFileOps")) {
successfulFileOps = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":mapStartTimeTPmS")) {
mapStartTimeTPmS = Long.parseLong(tokens.nextToken());
} else if (attr.endsWith(":mapEndTimeTPmS")) {
mapEndTimeTPmS = Long.parseLong(tokens.nextToken());
}
}
// Average latency is the average time to perform 'n' number of
// operations, n being the number of files
double avgLatency1 = (double) totalTimeAL1 / successfulFileOps;
double avgLatency2 = (double) totalTimeAL2 / successfulFileOps;
// The time it takes for the longest running map is measured. Using that,
// cluster transactions per second is calculated. It includes time to
// retry any of the failed operations
double longestMapTimeTPmS = (double) (mapEndTimeTPmS - mapStartTimeTPmS);
double totalTimeTPS = (longestMapTimeTPmS == 0) ?
(1000 * successfulFileOps) :
(double) (1000 * successfulFileOps) / longestMapTimeTPmS;
// The time it takes to perform 'n' operations is calculated (in ms),
// n being the number of files. Using that time, the average execution
// time is calculated. It includes time to retry any of the
// failed operations
double AverageExecutionTime = (totalTimeTPmS == 0) ?
(double) successfulFileOps :
(double) totalTimeTPmS / successfulFileOps;
if (operation.equals(OP_CREATE_WRITE)) {
// For create/write/close, it is treated as two transactions,
// since a file create from a client perspective involves create and close
resultTPSLine1 = " TPS: Create/Write/Close: " +
(int) (totalTimeTPS * 2);
resultTPSLine2 = "Avg exec time (ms): Create/Write/Close: " +
AverageExecutionTime;
resultALLine1 = " Avg Lat (ms): Create/Write: " + avgLatency1;
resultALLine2 = " Avg Lat (ms): Close: " + avgLatency2;
} else if (operation.equals(OP_OPEN_READ)) {
resultTPSLine1 = " TPS: Open/Read: " +
(int) totalTimeTPS;
resultTPSLine2 = " Avg Exec time (ms): Open/Read: " +
AverageExecutionTime;
resultALLine1 = " Avg Lat (ms): Open: " + avgLatency1;
if (readFileAfterOpen) {
resultALLine2 = " Avg Lat (ms): Read: " + avgLatency2;
}
} else if (operation.equals(OP_RENAME)) {
resultTPSLine1 = " TPS: Rename: " +
(int) totalTimeTPS;
resultTPSLine2 = " Avg Exec time (ms): Rename: " +
AverageExecutionTime;
resultALLine1 = " Avg Lat (ms): Rename: " + avgLatency1;
} else if (operation.equals(OP_DELETE)) {
resultTPSLine1 = " TPS: Delete: " +
(int) totalTimeTPS;
resultTPSLine2 = " Avg Exec time (ms): Delete: " +
AverageExecutionTime;
resultALLine1 = " Avg Lat (ms): Delete: " + avgLatency1;
}
String resultLines[] = {
"-------------- NNBench -------------- : ",
" Version: " + NNBENCH_VERSION,
" Date & time: " + sdf.format(new Date(
System.currentTimeMillis())),
"",
" Test Operation: " + operation,
" Start time: " +
sdf.format(new Date(startTime)),
" Maps to run: " + numberOfMaps,
" Reduces to run: " + numberOfReduces,
" Block Size (bytes): " + blockSize,
" Bytes to write: " + bytesToWrite,
" Bytes per checksum: " + bytesPerChecksum,
" Number of files: " + numberOfFiles,
" Replication factor: " + replicationFactorPerFile,
" Successful file operations: " + successfulFileOps,
"",
" # maps that missed the barrier: " + lateMaps,
" # exceptions: " + numOfExceptions,
"",
resultTPSLine1,
resultTPSLine2,
resultALLine1,
resultALLine2,
"",
" RAW DATA: AL Total #1: " + totalTimeAL1,
" RAW DATA: AL Total #2: " + totalTimeAL2,
" RAW DATA: TPS Total (ms): " + totalTimeTPmS,
" RAW DATA: Longest Map Time (ms): " + longestMapTimeTPmS,
" RAW DATA: Late maps: " + lateMaps,
" RAW DATA: # of exceptions: " + numOfExceptions,
"" };
PrintStream res = new PrintStream(new FileOutputStream(
new File(DEFAULT_RES_FILE_NAME), true));
// Write to a file and also dump to log
for(int i = 0; i < resultLines.length; i++) {
LOG.info(resultLines[i]);
res.println(resultLines[i]);
}
}
/**
* Run the test
*
* @throws IOException on error
*/
public static void runTests() throws IOException {
config.setLong("io.bytes.per.checksum", bytesPerChecksum);
JobConf job = new JobConf(config, NNBench.class);
job.setJobName("NNBench-" + operation);
FileInputFormat.setInputPaths(job, new Path(baseDir, CONTROL_DIR_NAME));
job.setInputFormat(SequenceFileInputFormat.class);
// Explicitly set number of max map attempts to 1.
job.setMaxMapAttempts(1);
// Explicitly turn off speculative execution
job.setSpeculativeExecution(false);
job.setMapperClass(NNBenchMapper.class);
job.setReducerClass(NNBenchReducer.class);
FileOutputFormat.setOutputPath(job, new Path(baseDir, OUTPUT_DIR_NAME));
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks((int) numberOfReduces);
JobClient.runJob(job);
}
/**
* Validate the inputs
*/
public static void validateInputs() {
// If it is not one of the four operations, then fail
if (!operation.equals(OP_CREATE_WRITE) &&
!operation.equals(OP_OPEN_READ) &&
!operation.equals(OP_RENAME) &&
!operation.equals(OP_DELETE)) {
System.err.println("Error: Unknown operation: " + operation);
displayUsage();
System.exit(-1);
}
// If number of maps is a negative number, then fail
// Hadoop allows the number of maps to be 0
if (numberOfMaps < 0) {
System.err.println("Error: Number of maps must be a positive number");
displayUsage();
System.exit(-1);
}
// If number of reduces is a negative number or 0, then fail
if (numberOfReduces <= 0) {
System.err.println("Error: Number of reduces must be a positive number");
displayUsage();
System.exit(-1);
}
// If blocksize is a negative number or 0, then fail
if (blockSize <= 0) {
System.err.println("Error: Block size must be a positive number");
displayUsage();
System.exit(-1);
}
// If bytes to write is a negative number, then fail
if (bytesToWrite < 0) {
System.err.println("Error: Bytes to write must be a positive number");
displayUsage();
System.exit(-1);
}
// If bytes per checksum is a negative number, then fail
if (bytesPerChecksum < 0) {
System.err.println("Error: Bytes per checksum must be a positive number");
displayUsage();
System.exit(-1);
}
// If number of files is a negative number, then fail
if (numberOfFiles < 0) {
System.err.println("Error: Number of files must be a positive number");
displayUsage();
System.exit(-1);
}
// If replication factor is a negative number, then fail
if (replicationFactorPerFile < 0) {
System.err.println("Error: Replication factor must be a positive number");
displayUsage();
System.exit(-1);
}
// If block size is not a multiple of bytesperchecksum, fail
if (blockSize % bytesPerChecksum != 0) {
System.err.println("Error: Block Size in bytes must be a multiple of " +
"bytes per checksum: ");
displayUsage();
System.exit(-1);
}
}
/**
* Main method for running the NNBench benchmarks
*
* @param args array of command line arguments
* @throws IOException indicates a problem with test startup
*/
public static void main(String[] args) throws IOException {
// Display the application version string
displayVersion();
// Parse the inputs
parseInputs(args);
// Validate inputs
validateInputs();
// Clean up files before the test run
cleanupBeforeTestrun();
// Create control files before test run
createControlFiles();
// Run the tests as a map reduce job
runTests();
// Analyze results
analyzeResults();
}
/**
* Mapper class
*/
static class NNBenchMapper extends Configured
implements Mapper<Text, LongWritable, Text, Text> {
FileSystem filesystem = null;
private String hostName = null;
long numberOfFiles = 1l;
long blkSize = 1l;
short replFactor = 1;
int bytesToWrite = 0;
String baseDir = null;
String dataDirName = null;
String op = null;
boolean readFile = false;
final int MAX_OPERATION_EXCEPTIONS = 1000;
// Data to collect from the operation
int numOfExceptions = 0;
long startTimeAL = 0l;
long totalTimeAL1 = 0l;
long totalTimeAL2 = 0l;
long successfulFileOps = 0l;
/**
* Constructor
*/
public NNBenchMapper() {
}
/**
* Mapper base implementation
*/
public void configure(JobConf conf) {
setConf(conf);
try {
filesystem = FileSystem.get(conf);
} catch(Exception e) {
throw new RuntimeException("Cannot get file system.", e);
}
try {
hostName = InetAddress.getLocalHost().getHostName();
} catch(Exception e) {
throw new RuntimeException("Error getting hostname", e);
}
}
/**
* Mapper base implementation
*/
public void close() throws IOException {
}
/**
* Returns when the current number of seconds from the epoch equals
* the command line argument given by <code>-startTime</code>.
* This allows multiple instances of this program, running on clock
* synchronized nodes, to start at roughly the same time.
* @return true if the method was able to sleep for <code>-startTime</code>
* without interruption; false otherwise
*/
private boolean barrier() {
long startTime = getConf().getLong("test.nnbench.starttime", 0l);
long currentTime = System.currentTimeMillis();
long sleepTime = startTime - currentTime;
boolean retVal = false;
// If the sleep time is greater than 0, then sleep and return
if (sleepTime > 0) {
LOG.info("Waiting in barrier for: " + sleepTime + " ms");
try {
Thread.sleep(sleepTime);
retVal = true;
} catch (Exception e) {
retVal = false;
}
}
return retVal;
}
/**
* Map method
*/
public void map(Text key,
LongWritable value,
OutputCollector<Text, Text> output,
Reporter reporter) throws IOException {
Configuration conf = filesystem.getConf();
numberOfFiles = conf.getLong("test.nnbench.numberoffiles", 1l);
blkSize = conf.getLong("test.nnbench.blocksize", 1l);
replFactor = (short) (conf.getInt("test.nnbench.replicationfactor", 1));
bytesToWrite = conf.getInt("test.nnbench.bytestowrite", 0);
baseDir = conf.get("test.nnbench.basedir");
dataDirName = conf.get("test.nnbench.datadir.name");
op = conf.get("test.nnbench.operation");
readFile = conf.getBoolean("test.nnbench.readFileAfterOpen", false);
int taskId =
TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID))
.getTaskID().getId();
long totalTimeTPmS = 0l;
long startTimeTPmS = 0l;
long endTimeTPms = 0l;
numOfExceptions = 0;
startTimeAL = 0l;
totalTimeAL1 = 0l;
totalTimeAL2 = 0l;
successfulFileOps = 0l;
if (barrier()) {
String filePrefix = "file_" + taskId + "_";
if (op.equals(OP_CREATE_WRITE)) {
startTimeTPmS = System.currentTimeMillis();
doCreateWriteOp(filePrefix, reporter);
} else if (op.equals(OP_OPEN_READ)) {
startTimeTPmS = System.currentTimeMillis();
doOpenReadOp(filePrefix, reporter);
} else if (op.equals(OP_RENAME)) {
startTimeTPmS = System.currentTimeMillis();
doRenameOp(filePrefix, reporter);
} else if (op.equals(OP_DELETE)) {
startTimeTPmS = System.currentTimeMillis();
doDeleteOp(filePrefix, reporter);
}
endTimeTPms = System.currentTimeMillis();
totalTimeTPmS = endTimeTPms - startTimeTPmS;
} else {
output.collect(new Text("l:latemaps"), new Text("1"));
}
// collect after the map end time is measured
output.collect(new Text("l:totalTimeAL1"),
new Text(String.valueOf(totalTimeAL1)));
output.collect(new Text("l:totalTimeAL2"),
new Text(String.valueOf(totalTimeAL2)));
output.collect(new Text("l:numOfExceptions"),
new Text(String.valueOf(numOfExceptions)));
output.collect(new Text("l:successfulFileOps"),
new Text(String.valueOf(successfulFileOps)));
output.collect(new Text("l:totalTimeTPmS"),
new Text(String.valueOf(totalTimeTPmS)));
output.collect(new Text("min:mapStartTimeTPmS"),
new Text(String.valueOf(startTimeTPmS)));
output.collect(new Text("max:mapEndTimeTPmS"),
new Text(String.valueOf(endTimeTPms)));
}
/**
* Create and Write operation.
* @param name of the prefix of the putput file to be created
* @param reporter an instanse of (@link Reporter) to be used for
* status' updates
*/
private void doCreateWriteOp(String name,
Reporter reporter) {
FSDataOutputStream out;
byte[] buffer = new byte[bytesToWrite];
for (long l = 0l; l < numberOfFiles; l++) {
Path filePath = new Path(new Path(baseDir, dataDirName),
name + "_" + l);
boolean successfulOp = false;
while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
try {
// Set up timer for measuring AL (transaction #1)
startTimeAL = System.currentTimeMillis();
// Create the file
// Use a buffer size of 512
out = filesystem.create(filePath,
true,
512,
replFactor,
blkSize);
out.write(buffer);
totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
// Close the file / file output stream
// Set up timers for measuring AL (transaction #2)
startTimeAL = System.currentTimeMillis();
out.close();
totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
successfulOp = true;
successfulFileOps ++;
reporter.setStatus("Finish "+ l + " files");
} catch (IOException e) {
LOG.info("Exception recorded in op: " +
"Create/Write/Close");
numOfExceptions++;
}
}
}
}
/**
* Open operation
* @param name of the prefix of the putput file to be read
* @param reporter an instanse of (@link Reporter) to be used for
* status' updates
*/
private void doOpenReadOp(String name,
Reporter reporter) {
FSDataInputStream input;
byte[] buffer = new byte[bytesToWrite];
for (long l = 0l; l < numberOfFiles; l++) {
Path filePath = new Path(new Path(baseDir, dataDirName),
name + "_" + l);
boolean successfulOp = false;
while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
try {
// Set up timer for measuring AL
startTimeAL = System.currentTimeMillis();
input = filesystem.open(filePath);
totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
// If the file needs to be read (specified at command line)
if (readFile) {
startTimeAL = System.currentTimeMillis();
input.readFully(buffer);
totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
}
input.close();
successfulOp = true;
successfulFileOps ++;
reporter.setStatus("Finish "+ l + " files");
} catch (IOException e) {
LOG.info("Exception recorded in op: OpenRead " + e);
numOfExceptions++;
}
}
}
}
/**
* Rename operation
* @param name of prefix of the file to be renamed
* @param reporter an instanse of (@link Reporter) to be used for
* status' updates
*/
private void doRenameOp(String name,
Reporter reporter) {
for (long l = 0l; l < numberOfFiles; l++) {
Path filePath = new Path(new Path(baseDir, dataDirName),
name + "_" + l);
Path filePathR = new Path(new Path(baseDir, dataDirName),
name + "_r_" + l);
boolean successfulOp = false;
while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
try {
// Set up timer for measuring AL
startTimeAL = System.currentTimeMillis();
filesystem.rename(filePath, filePathR);
totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
successfulOp = true;
successfulFileOps ++;
reporter.setStatus("Finish "+ l + " files");
} catch (IOException e) {
LOG.info("Exception recorded in op: Rename");
numOfExceptions++;
}
}
}
}
/**
* Delete operation
* @param name of prefix of the file to be deleted
* @param reporter an instanse of (@link Reporter) to be used for
* status' updates
*/
private void doDeleteOp(String name,
Reporter reporter) {
for (long l = 0l; l < numberOfFiles; l++) {
Path filePath = new Path(new Path(baseDir, dataDirName),
name + "_" + l);
boolean successfulOp = false;
while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
try {
// Set up timer for measuring AL
startTimeAL = System.currentTimeMillis();
filesystem.delete(filePath, true);
totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
successfulOp = true;
successfulFileOps ++;
reporter.setStatus("Finish "+ l + " files");
} catch (IOException e) {
LOG.info("Exception in recorded op: Delete");
numOfExceptions++;
}
}
}
}
}
/**
* Reducer class
*/
static class NNBenchReducer extends MapReduceBase
implements Reducer<Text, Text, Text, Text> {
protected String hostName;
public NNBenchReducer () {
LOG.info("Starting NNBenchReducer !!!");
try {
hostName = java.net.InetAddress.getLocalHost().getHostName();
} catch(Exception e) {
hostName = "localhost";
}
LOG.info("Starting NNBenchReducer on " + hostName);
}
/**
* Reduce method
*/
public void reduce(Text key,
Iterator<Text> values,
OutputCollector<Text, Text> output,
Reporter reporter
) throws IOException {
String field = key.toString();
reporter.setStatus("starting " + field + " ::host = " + hostName);
// sum long values
if (field.startsWith("l:")) {
long lSum = 0;
while (values.hasNext()) {
lSum += Long.parseLong(values.next().toString());
}
output.collect(key, new Text(String.valueOf(lSum)));
}
if (field.startsWith("min:")) {
long minVal = -1;
while (values.hasNext()) {
long value = Long.parseLong(values.next().toString());
if (minVal == -1) {
minVal = value;
} else {
if (value != 0 && value < minVal) {
minVal = value;
}
}
}
output.collect(key, new Text(String.valueOf(minVal)));
}
if (field.startsWith("max:")) {
long maxVal = -1;
while (values.hasNext()) {
long value = Long.parseLong(values.next().toString());
if (maxVal == -1) {
maxVal = value;
} else {
if (value > maxVal) {
maxVal = value;
}
}
}
output.collect(key, new Text(String.valueOf(maxVal)));
}
reporter.setStatus("finished " + field + " ::host = " + hostName);
}
}
}
| 35,335
| 34.983707
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/testshell/ExternalMapReduce.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testshell;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* will be in an external jar and used for
* test in TestJobShell.java.
*/
public class ExternalMapReduce extends Configured implements Tool {
public void configure(JobConf job) {
// do nothing
}
public void close()
throws IOException {
}
public static class MapClass extends MapReduceBase
implements Mapper<WritableComparable, Writable,
WritableComparable, IntWritable> {
public void map(WritableComparable key, Writable value,
OutputCollector<WritableComparable, IntWritable> output,
Reporter reporter)
throws IOException {
//check for classpath
String classpath = System.getProperty("java.class.path");
if (classpath.indexOf("testjob.jar") == -1) {
throw new IOException("failed to find in the library " + classpath);
}
if (classpath.indexOf("test.jar") == -1) {
throw new IOException("failed to find the library test.jar in"
+ classpath);
}
//fork off ls to see if the file exists.
// java file.exists() will not work on
// Windows since it is a symlink
String[] argv = new String[7];
argv[0] = "ls";
argv[1] = "files_tmp";
argv[2] = "localfilelink";
argv[3] = "dfsfilelink";
argv[4] = "tarlink";
argv[5] = "ziplink";
argv[6] = "test.tgz";
Process p = Runtime.getRuntime().exec(argv);
int ret = -1;
try {
ret = p.waitFor();
} catch(InterruptedException ie) {
//do nothing here.
}
if (ret != 0) {
throw new IOException("files_tmp does not exist");
}
}
}
public static class Reduce extends MapReduceBase
implements Reducer<WritableComparable, Writable,
WritableComparable, IntWritable> {
public void reduce(WritableComparable key, Iterator<Writable> values,
OutputCollector<WritableComparable, IntWritable> output,
Reporter reporter)
throws IOException {
//do nothing
}
}
public int run(String[] argv) throws IOException {
if (argv.length < 2) {
System.out.println("ExternalMapReduce <input> <output>");
return -1;
}
Path outDir = new Path(argv[1]);
Path input = new Path(argv[0]);
JobConf testConf = new JobConf(getConf(), ExternalMapReduce.class);
//try to load a class from libjar
try {
testConf.getClassByName("testjar.ClassWordCount");
} catch (ClassNotFoundException e) {
System.out.println("Could not find class from libjar");
return -1;
}
testConf.setJobName("external job");
FileInputFormat.setInputPaths(testConf, input);
FileOutputFormat.setOutputPath(testConf, outDir);
testConf.setMapperClass(MapClass.class);
testConf.setReducerClass(Reduce.class);
testConf.setNumReduceTasks(1);
JobClient.runJob(testConf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(),
new ExternalMapReduce(), args);
System.exit(res);
}
}
| 4,770
| 32.598592
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.QueueAclsInfo;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
public class ResourceMgrDelegate extends YarnClient {
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
private YarnConfiguration conf;
private ApplicationSubmissionContext application;
private ApplicationId applicationId;
@Private
@VisibleForTesting
protected YarnClient client;
private Text rmDTService;
/**
* Delegate responsible for communicating with the Resource Manager's
* {@link ApplicationClientProtocol}.
* @param conf the configuration object.
*/
public ResourceMgrDelegate(YarnConfiguration conf) {
super(ResourceMgrDelegate.class.getName());
this.conf = conf;
this.client = YarnClient.createYarnClient();
init(conf);
start();
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
client.init(conf);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
client.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
client.stop();
super.serviceStop();
}
public TaskTrackerInfo[] getActiveTrackers() throws IOException,
InterruptedException {
try {
return TypeConverter.fromYarnNodes(
client.getNodeReports(NodeState.RUNNING));
} catch (YarnException e) {
throw new IOException(e);
}
}
public JobStatus[] getAllJobs() throws IOException, InterruptedException {
try {
Set<String> appTypes = new HashSet<String>(1);
appTypes.add(MRJobConfig.MR_APPLICATION_TYPE);
EnumSet<YarnApplicationState> appStates =
EnumSet.noneOf(YarnApplicationState.class);
return TypeConverter.fromYarnApps(
client.getApplications(appTypes, appStates), this.conf);
} catch (YarnException e) {
throw new IOException(e);
}
}
public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
InterruptedException {
// TODO: Implement getBlacklistedTrackers
LOG.warn("getBlacklistedTrackers - Not implemented yet");
return new TaskTrackerInfo[0];
}
public ClusterMetrics getClusterMetrics() throws IOException,
InterruptedException {
try {
YarnClusterMetrics metrics = client.getYarnClusterMetrics();
ClusterMetrics oldMetrics =
new ClusterMetrics(1, 1, 1, 1, 1, 1,
metrics.getNumNodeManagers() * 10,
metrics.getNumNodeManagers() * 2, 1,
metrics.getNumNodeManagers(), 0, 0);
return oldMetrics;
} catch (YarnException e) {
throw new IOException(e);
}
}
public Text getRMDelegationTokenService() {
if (rmDTService == null) {
rmDTService = ClientRMProxy.getRMDelegationTokenService(conf);
}
return rmDTService;
}
@SuppressWarnings("rawtypes")
public Token getDelegationToken(Text renewer) throws IOException,
InterruptedException {
try {
return ConverterUtils.convertFromYarn(
client.getRMDelegationToken(renewer), getRMDelegationTokenService());
} catch (YarnException e) {
throw new IOException(e);
}
}
public String getFilesystemName() throws IOException, InterruptedException {
return FileSystem.get(conf).getUri().toString();
}
public JobID getNewJobID() throws IOException, InterruptedException {
try {
this.application = client.createApplication().getApplicationSubmissionContext();
this.applicationId = this.application.getApplicationId();
return TypeConverter.fromYarn(applicationId);
} catch (YarnException e) {
throw new IOException(e);
}
}
public QueueInfo getQueue(String queueName) throws IOException,
InterruptedException {
try {
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
client.getQueueInfo(queueName);
return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo,
conf);
} catch (YarnException e) {
throw new IOException(e);
}
}
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
InterruptedException {
try {
return TypeConverter.fromYarnQueueUserAclsInfo(client
.getQueueAclsInfo());
} catch (YarnException e) {
throw new IOException(e);
}
}
public QueueInfo[] getQueues() throws IOException, InterruptedException {
try {
return TypeConverter.fromYarnQueueInfo(client.getAllQueues(), this.conf);
} catch (YarnException e) {
throw new IOException(e);
}
}
public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
try {
return TypeConverter.fromYarnQueueInfo(client.getRootQueueInfos(),
this.conf);
} catch (YarnException e) {
throw new IOException(e);
}
}
public QueueInfo[] getChildQueues(String parent) throws IOException,
InterruptedException {
try {
return TypeConverter.fromYarnQueueInfo(client.getChildQueueInfos(parent),
this.conf);
} catch (YarnException e) {
throw new IOException(e);
}
}
public String getStagingAreaDir() throws IOException, InterruptedException {
// Path path = new Path(MRJobConstants.JOB_SUBMIT_DIR);
String user =
UserGroupInformation.getCurrentUser().getShortUserName();
Path path = MRApps.getStagingAreaDir(conf, user);
LOG.debug("getStagingAreaDir: dir=" + path);
return path.toString();
}
public String getSystemDir() throws IOException, InterruptedException {
Path sysDir = new Path(MRJobConfig.JOB_SUBMIT_DIR);
//FileContext.getFileContext(conf).delete(sysDir, true);
return sysDir.toString();
}
public long getTaskTrackerExpiryInterval() throws IOException,
InterruptedException {
return 0;
}
public void setJobPriority(JobID arg0, String arg1) throws IOException,
InterruptedException {
return;
}
public long getProtocolVersion(String arg0, long arg1) throws IOException {
return 0;
}
public ApplicationId getApplicationId() {
return applicationId;
}
@Override
public YarnClientApplication createApplication() throws
YarnException, IOException {
return client.createApplication();
}
@Override
public ApplicationId
submitApplication(ApplicationSubmissionContext appContext)
throws YarnException, IOException {
return client.submitApplication(appContext);
}
@Override
public void killApplication(ApplicationId applicationId)
throws YarnException, IOException {
client.killApplication(applicationId);
}
@Override
public ApplicationReport getApplicationReport(ApplicationId appId)
throws YarnException, IOException {
return client.getApplicationReport(appId);
}
@Override
public Token<AMRMTokenIdentifier> getAMRMToken(ApplicationId appId)
throws YarnException, IOException {
throw new UnsupportedOperationException();
}
@Override
public List<ApplicationReport> getApplications() throws YarnException,
IOException {
return client.getApplications();
}
@Override
public List<ApplicationReport> getApplications(Set<String> applicationTypes)
throws YarnException,
IOException {
return client.getApplications(applicationTypes);
}
@Override
public List<ApplicationReport> getApplications(
EnumSet<YarnApplicationState> applicationStates) throws YarnException,
IOException {
return client.getApplications(applicationStates);
}
@Override
public List<ApplicationReport> getApplications(
Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates)
throws YarnException, IOException {
return client.getApplications(applicationTypes, applicationStates);
}
@Override
public List<ApplicationReport> getApplications(Set<String> queues,
Set<String> users, Set<String> applicationTypes,
EnumSet<YarnApplicationState> applicationStates) throws YarnException,
IOException {
return client.getApplications(queues, users, applicationTypes,
applicationStates);
}
@Override
public YarnClusterMetrics getYarnClusterMetrics() throws YarnException,
IOException {
return client.getYarnClusterMetrics();
}
@Override
public List<NodeReport> getNodeReports(NodeState... states)
throws YarnException, IOException {
return client.getNodeReports(states);
}
@Override
public org.apache.hadoop.yarn.api.records.Token getRMDelegationToken(
Text renewer) throws YarnException, IOException {
return client.getRMDelegationToken(renewer);
}
@Override
public org.apache.hadoop.yarn.api.records.QueueInfo getQueueInfo(
String queueName) throws YarnException, IOException {
return client.getQueueInfo(queueName);
}
@Override
public List<org.apache.hadoop.yarn.api.records.QueueInfo> getAllQueues()
throws YarnException, IOException {
return client.getAllQueues();
}
@Override
public List<org.apache.hadoop.yarn.api.records.QueueInfo> getRootQueueInfos()
throws YarnException, IOException {
return client.getRootQueueInfos();
}
@Override
public List<org.apache.hadoop.yarn.api.records.QueueInfo> getChildQueueInfos(
String parent) throws YarnException, IOException {
return client.getChildQueueInfos(parent);
}
@Override
public List<QueueUserACLInfo> getQueueAclsInfo() throws YarnException,
IOException {
return client.getQueueAclsInfo();
}
@Override
public ApplicationAttemptReport getApplicationAttemptReport(
ApplicationAttemptId appAttemptId) throws YarnException, IOException {
return client.getApplicationAttemptReport(appAttemptId);
}
@Override
public List<ApplicationAttemptReport> getApplicationAttempts(
ApplicationId appId) throws YarnException, IOException {
return client.getApplicationAttempts(appId);
}
@Override
public ContainerReport getContainerReport(ContainerId containerId)
throws YarnException, IOException {
return client.getContainerReport(containerId);
}
@Override
public List<ContainerReport> getContainers(
ApplicationAttemptId applicationAttemptId) throws YarnException,
IOException {
return client.getContainers(applicationAttemptId);
}
@Override
public void moveApplicationAcrossQueues(ApplicationId appId, String queue)
throws YarnException, IOException {
client.moveApplicationAcrossQueues(appId, queue);
}
@Override
public ReservationSubmissionResponse submitReservation(
ReservationSubmissionRequest request) throws YarnException, IOException {
return client.submitReservation(request);
}
@Override
public ReservationUpdateResponse updateReservation(
ReservationUpdateRequest request) throws YarnException, IOException {
return client.updateReservation(request);
}
@Override
public ReservationDeleteResponse deleteReservation(
ReservationDeleteRequest request) throws YarnException, IOException {
return client.deleteReservation(request);
}
@Override
public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException,
IOException {
return client.getNodeToLabels();
}
@Override
public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException,
IOException {
return client.getLabelsToNodes();
}
@Override
public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels)
throws YarnException, IOException {
return client.getLabelsToNodes(labels);
}
@Override
public List<NodeLabel> getClusterNodeLabels()
throws YarnException, IOException {
return client.getClusterNodeLabels();
}
}
| 15,303
| 31.561702
| 86
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
public class ClientServiceDelegate {
private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class);
private static final String UNAVAILABLE = "N/A";
// Caches for per-user NotRunningJobs
private HashMap<JobState, HashMap<String, NotRunningJob>> notRunningJobs;
private final Configuration conf;
private final JobID jobId;
private final ApplicationId appId;
private final ResourceMgrDelegate rm;
private final MRClientProtocol historyServerProxy;
private MRClientProtocol realProxy = null;
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private static String UNKNOWN_USER = "Unknown User";
private String trackingUrl;
private AtomicBoolean usingAMProxy = new AtomicBoolean(false);
private int maxClientRetry;
private boolean amAclDisabledStatusLogged = false;
public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
JobID jobId, MRClientProtocol historyServerProxy) {
this.conf = new Configuration(conf); // Cloning for modifying.
// For faster redirects from AM to HS.
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES));
this.conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
this.conf.getInt(MRJobConfig.MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS,
MRJobConfig.DEFAULT_MR_CLIENT_TO_AM_IPC_MAX_RETRIES_ON_TIMEOUTS));
this.rm = rm;
this.jobId = jobId;
this.historyServerProxy = historyServerProxy;
this.appId = TypeConverter.toYarn(jobId).getAppId();
notRunningJobs = new HashMap<JobState, HashMap<String, NotRunningJob>>();
}
// Get the instance of the NotRunningJob corresponding to the specified
// user and state
private NotRunningJob getNotRunningJob(ApplicationReport applicationReport,
JobState state) {
synchronized (notRunningJobs) {
HashMap<String, NotRunningJob> map = notRunningJobs.get(state);
if (map == null) {
map = new HashMap<String, NotRunningJob>();
notRunningJobs.put(state, map);
}
String user =
(applicationReport == null) ?
UNKNOWN_USER : applicationReport.getUser();
NotRunningJob notRunningJob = map.get(user);
if (notRunningJob == null) {
notRunningJob = new NotRunningJob(applicationReport, state);
map.put(user, notRunningJob);
}
return notRunningJob;
}
}
private MRClientProtocol getProxy() throws IOException {
if (realProxy != null) {
return realProxy;
}
// Possibly allow nulls through the PB tunnel, otherwise deal with an exception
// and redirect to the history server.
ApplicationReport application = null;
try {
application = rm.getApplicationReport(appId);
} catch (ApplicationNotFoundException e) {
application = null;
} catch (YarnException e2) {
throw new IOException(e2);
}
if (application != null) {
trackingUrl = application.getTrackingUrl();
}
InetSocketAddress serviceAddr = null;
while (application == null
|| YarnApplicationState.RUNNING == application
.getYarnApplicationState()) {
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.NEW);
}
try {
if (application.getHost() == null || "".equals(application.getHost())) {
LOG.debug("AM not assigned to Job. Waiting to get the AM ...");
Thread.sleep(2000);
LOG.debug("Application state is " + application.getYarnApplicationState());
application = rm.getApplicationReport(appId);
continue;
} else if (UNAVAILABLE.equals(application.getHost())) {
if (!amAclDisabledStatusLogged) {
LOG.info("Job " + jobId + " is running, but the host is unknown."
+ " Verify user has VIEW_JOB access.");
amAclDisabledStatusLogged = true;
}
return getNotRunningJob(application, JobState.RUNNING);
}
if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
UserGroupInformation.getCurrentUser().getUserName());
serviceAddr = NetUtils.createSocketAddrForHost(
application.getHost(), application.getRpcPort());
if (UserGroupInformation.isSecurityEnabled()) {
org.apache.hadoop.yarn.api.records.Token clientToAMToken =
application.getClientToAMToken();
Token<ClientToAMTokenIdentifier> token =
ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
newUgi.addToken(token);
}
LOG.debug("Connecting to " + serviceAddr);
final InetSocketAddress finalServiceAddr = serviceAddr;
realProxy = newUgi.doAs(new PrivilegedExceptionAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() throws IOException {
return instantiateAMProxy(finalServiceAddr);
}
});
} else {
if (!amAclDisabledStatusLogged) {
LOG.info("Network ACL closed to AM for job " + jobId
+ ". Not going to try to reach the AM.");
amAclDisabledStatusLogged = true;
}
return getNotRunningJob(null, JobState.RUNNING);
}
return realProxy;
} catch (IOException e) {
//possibly the AM has crashed
//there may be some time before AM is restarted
//keep retrying by getting the address from RM
LOG.info("Could not connect to " + serviceAddr +
". Waiting for getting the latest AM address...");
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
LOG.warn("getProxy() call interruped", e1);
throw new YarnRuntimeException(e1);
}
try {
application = rm.getApplicationReport(appId);
} catch (YarnException e1) {
throw new IOException(e1);
}
if (application == null) {
LOG.info("Could not get Job info from RM for job " + jobId
+ ". Redirecting to job history server.");
return checkAndGetHSProxy(null, JobState.RUNNING);
}
} catch (InterruptedException e) {
LOG.warn("getProxy() call interruped", e);
throw new YarnRuntimeException(e);
} catch (YarnException e) {
throw new IOException(e);
}
}
/** we just want to return if its allocating, so that we don't
* block on it. This is to be able to return job status
* on an allocating Application.
*/
String user = application.getUser();
if (user == null) {
throw new IOException("User is not set in the application report");
}
if (application.getYarnApplicationState() == YarnApplicationState.NEW
|| application.getYarnApplicationState() ==
YarnApplicationState.NEW_SAVING
|| application.getYarnApplicationState() == YarnApplicationState.SUBMITTED
|| application.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
realProxy = null;
return getNotRunningJob(application, JobState.NEW);
}
if (application.getYarnApplicationState() == YarnApplicationState.FAILED) {
realProxy = null;
return getNotRunningJob(application, JobState.FAILED);
}
if (application.getYarnApplicationState() == YarnApplicationState.KILLED) {
realProxy = null;
return getNotRunningJob(application, JobState.KILLED);
}
//History server can serve a job only if application
//succeeded.
if (application.getYarnApplicationState() == YarnApplicationState.FINISHED) {
LOG.info("Application state is completed. FinalApplicationStatus="
+ application.getFinalApplicationStatus().toString()
+ ". Redirecting to job history server");
realProxy = checkAndGetHSProxy(application, JobState.SUCCEEDED);
}
return realProxy;
}
private MRClientProtocol checkAndGetHSProxy(
ApplicationReport applicationReport, JobState state) {
if (null == historyServerProxy) {
LOG.warn("Job History Server is not configured.");
return getNotRunningJob(applicationReport, state);
}
return historyServerProxy;
}
MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr)
throws IOException {
LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol proxy =
(MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
serviceAddr, conf);
usingAMProxy.set(true);
LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
return proxy;
}
private synchronized Object invoke(String method, Class argClass,
Object args) throws IOException {
Method methodOb = null;
try {
methodOb = MRClientProtocol.class.getMethod(method, argClass);
} catch (SecurityException e) {
throw new YarnRuntimeException(e);
} catch (NoSuchMethodException e) {
throw new YarnRuntimeException("Method name mismatch", e);
}
maxClientRetry = this.conf.getInt(
MRJobConfig.MR_CLIENT_MAX_RETRIES,
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
IOException lastException = null;
while (maxClientRetry > 0) {
MRClientProtocol MRClientProxy = null;
try {
MRClientProxy = getProxy();
return methodOb.invoke(MRClientProxy, args);
} catch (InvocationTargetException e) {
// Will not throw out YarnException anymore
LOG.debug("Failed to contact AM/History for job " + jobId +
" retrying..", e.getTargetException());
// Force reconnection by setting the proxy to null.
realProxy = null;
// HS/AMS shut down
if (e.getCause() instanceof AuthorizationException) {
throw new IOException(e.getTargetException());
}
// if it's AM shut down, do not decrement maxClientRetry as we wait for
// AM to be restarted.
if (!usingAMProxy.get()) {
maxClientRetry--;
}
usingAMProxy.set(false);
lastException = new IOException(e.getTargetException());
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
LOG.warn("ClientServiceDelegate invoke call interrupted", ie);
throw new YarnRuntimeException(ie);
}
} catch (Exception e) {
LOG.debug("Failed to contact AM/History for job " + jobId
+ " Will retry..", e);
// Force reconnection by setting the proxy to null.
realProxy = null;
// RM shutdown
maxClientRetry--;
lastException = new IOException(e.getMessage());
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
LOG.warn("ClientServiceDelegate invoke call interrupted", ie);
throw new YarnRuntimeException(ie);
}
}
}
throw lastException;
}
// Only for testing
@VisibleForTesting
public int getMaxClientRetry() {
return this.maxClientRetry;
}
public org.apache.hadoop.mapreduce.Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter.toYarn(arg0);
GetCountersRequest request = recordFactory.newRecordInstance(GetCountersRequest.class);
request.setJobId(jobID);
Counters cnt = ((GetCountersResponse)
invoke("getCounters", GetCountersRequest.class, request)).getCounters();
return TypeConverter.fromYarn(cnt);
}
public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1, int arg2)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobID = TypeConverter
.toYarn(arg0);
GetTaskAttemptCompletionEventsRequest request = recordFactory
.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
request.setJobId(jobID);
request.setFromEventId(arg1);
request.setMaxEvents(arg2);
List<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent> list =
((GetTaskAttemptCompletionEventsResponse) invoke(
"getTaskAttemptCompletionEvents", GetTaskAttemptCompletionEventsRequest.class, request)).
getCompletionEventList();
return TypeConverter
.fromYarn(list
.toArray(new org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[0]));
}
public String[] getTaskDiagnostics(org.apache.hadoop.mapreduce.TaskAttemptID arg0)
throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter
.toYarn(arg0);
GetDiagnosticsRequest request = recordFactory
.newRecordInstance(GetDiagnosticsRequest.class);
request.setTaskAttemptId(attemptID);
List<String> list = ((GetDiagnosticsResponse) invoke("getDiagnostics",
GetDiagnosticsRequest.class, request)).getDiagnosticsList();
String[] result = new String[list.size()];
int i = 0;
for (String c : list) {
result[i++] = c.toString();
}
return result;
}
public JobStatus getJobStatus(JobID oldJobID) throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report = ((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
JobStatus jobStatus = null;
if (report != null) {
if (StringUtils.isEmpty(report.getJobFile())) {
String jobFile = MRApps.getJobFile(conf, report.getUser(), oldJobID);
report.setJobFile(jobFile);
}
String historyTrackingUrl = report.getTrackingUrl();
String url = StringUtils.isNotEmpty(historyTrackingUrl)
? historyTrackingUrl : trackingUrl;
jobStatus = TypeConverter.fromYarn(report, url);
}
return jobStatus;
}
public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(JobID oldJobID, TaskType taskType)
throws IOException{
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetTaskReportsRequest request =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
request.setJobId(jobId);
request.setTaskType(TypeConverter.toYarn(taskType));
List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports =
((GetTaskReportsResponse) invoke("getTaskReports", GetTaskReportsRequest.class,
request)).getTaskReportList();
return TypeConverter.fromYarn
(taskReports).toArray(new org.apache.hadoop.mapreduce.TaskReport[0]);
}
public boolean killTask(TaskAttemptID taskAttemptID, boolean fail)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID
= TypeConverter.toYarn(taskAttemptID);
if (fail) {
FailTaskAttemptRequest failRequest = recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failRequest.setTaskAttemptId(attemptID);
invoke("failTaskAttempt", FailTaskAttemptRequest.class, failRequest);
} else {
KillTaskAttemptRequest killRequest = recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killRequest.setTaskAttemptId(attemptID);
invoke("killTaskAttempt", KillTaskAttemptRequest.class, killRequest);
}
return true;
}
public boolean killJob(JobID oldJobID)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId
= TypeConverter.toYarn(oldJobID);
KillJobRequest killRequest = recordFactory.newRecordInstance(KillJobRequest.class);
killRequest.setJobId(jobId);
invoke("killJob", KillJobRequest.class, killRequest);
return true;
}
public LogParams getLogFilePath(JobID oldJobID, TaskAttemptID oldTaskAttemptID)
throws IOException {
org.apache.hadoop.mapreduce.v2.api.records.JobId jobId =
TypeConverter.toYarn(oldJobID);
GetJobReportRequest request =
recordFactory.newRecordInstance(GetJobReportRequest.class);
request.setJobId(jobId);
JobReport report =
((GetJobReportResponse) invoke("getJobReport",
GetJobReportRequest.class, request)).getJobReport();
if (EnumSet.of(JobState.SUCCEEDED, JobState.FAILED, JobState.KILLED,
JobState.ERROR).contains(report.getJobState())) {
if (oldTaskAttemptID != null) {
GetTaskAttemptReportRequest taRequest =
recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
taRequest.setTaskAttemptId(TypeConverter.toYarn(oldTaskAttemptID));
TaskAttemptReport taReport =
((GetTaskAttemptReportResponse) invoke("getTaskAttemptReport",
GetTaskAttemptReportRequest.class, taRequest))
.getTaskAttemptReport();
if (taReport.getContainerId() == null
|| taReport.getNodeManagerHost() == null) {
throw new IOException("Unable to get log information for task: "
+ oldTaskAttemptID);
}
return new LogParams(
taReport.getContainerId().toString(),
taReport.getContainerId().getApplicationAttemptId()
.getApplicationId().toString(),
NodeId.newInstance(taReport.getNodeManagerHost(),
taReport.getNodeManagerPort()).toString(), report.getUser());
} else {
if (report.getAMInfos() == null || report.getAMInfos().size() == 0) {
throw new IOException("Unable to get log information for job: "
+ oldJobID);
}
AMInfo amInfo = report.getAMInfos().get(report.getAMInfos().size() - 1);
return new LogParams(
amInfo.getContainerId().toString(),
amInfo.getAppAttemptId().getApplicationId().toString(),
NodeId.newInstance(amInfo.getNodeManagerHost(),
amInfo.getNodeManagerPort()).toString(), report.getUser());
}
} else {
throw new IOException("Cannot get log path for a in-progress job");
}
}
}
| 23,081
| 42.063433
| 105
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
import org.apache.commons.lang.NotImplementedException;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class NotRunningJob implements MRClientProtocol {
private RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private final JobState jobState;
private final ApplicationReport applicationReport;
private ApplicationReport getUnknownApplicationReport() {
ApplicationId unknownAppId = recordFactory
.newRecordInstance(ApplicationId.class);
ApplicationAttemptId unknownAttemptId = recordFactory
.newRecordInstance(ApplicationAttemptId.class);
// Setting AppState to NEW and finalStatus to UNDEFINED as they are never
// used for a non running job
return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
"N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
"N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
}
NotRunningJob(ApplicationReport applicationReport, JobState jobState) {
this.applicationReport =
(applicationReport == null) ?
getUnknownApplicationReport() : applicationReport;
this.jobState = jobState;
}
@Override
public FailTaskAttemptResponse failTaskAttempt(
FailTaskAttemptRequest request) throws IOException {
FailTaskAttemptResponse resp =
recordFactory.newRecordInstance(FailTaskAttemptResponse.class);
return resp;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
GetCountersResponse resp =
recordFactory.newRecordInstance(GetCountersResponse.class);
Counters counters = recordFactory.newRecordInstance(Counters.class);
counters.addAllCounterGroups(new HashMap<String, CounterGroup>());
resp.setCounters(counters);
return resp;
}
@Override
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
throws IOException {
GetDiagnosticsResponse resp =
recordFactory.newRecordInstance(GetDiagnosticsResponse.class);
resp.addDiagnostics("");
return resp;
}
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws IOException {
JobReport jobReport =
recordFactory.newRecordInstance(JobReport.class);
jobReport.setJobId(request.getJobId());
jobReport.setJobState(jobState);
jobReport.setUser(applicationReport.getUser());
jobReport.setStartTime(applicationReport.getStartTime());
jobReport.setDiagnostics(applicationReport.getDiagnostics());
jobReport.setJobName(applicationReport.getName());
jobReport.setTrackingUrl(applicationReport.getTrackingUrl());
jobReport.setFinishTime(applicationReport.getFinishTime());
GetJobReportResponse resp =
recordFactory.newRecordInstance(GetJobReportResponse.class);
resp.setJobReport(jobReport);
return resp;
}
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
GetTaskAttemptCompletionEventsRequest request)
throws IOException {
GetTaskAttemptCompletionEventsResponse resp =
recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
resp.addAllCompletionEvents(new ArrayList<TaskAttemptCompletionEvent>());
return resp;
}
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
GetTaskAttemptReportRequest request) throws IOException {
//not invoked by anybody
throw new NotImplementedException();
}
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
throws IOException {
GetTaskReportResponse resp =
recordFactory.newRecordInstance(GetTaskReportResponse.class);
TaskReport report = recordFactory.newRecordInstance(TaskReport.class);
report.setTaskId(request.getTaskId());
report.setTaskState(TaskState.NEW);
Counters counters = recordFactory.newRecordInstance(Counters.class);
counters.addAllCounterGroups(new HashMap<String, CounterGroup>());
report.setCounters(counters);
report.addAllRunningAttempts(new ArrayList<TaskAttemptId>());
return resp;
}
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
throws IOException {
GetTaskReportsResponse resp =
recordFactory.newRecordInstance(GetTaskReportsResponse.class);
resp.addAllTaskReports(new ArrayList<TaskReport>());
return resp;
}
@Override
public KillJobResponse killJob(KillJobRequest request)
throws IOException {
KillJobResponse resp =
recordFactory.newRecordInstance(KillJobResponse.class);
return resp;
}
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
KillTaskResponse resp =
recordFactory.newRecordInstance(KillTaskResponse.class);
return resp;
}
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws IOException {
KillTaskAttemptResponse resp =
recordFactory.newRecordInstance(KillTaskAttemptResponse.class);
return resp;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws IOException {
/* Should not be invoked by anyone. */
throw new NotImplementedException();
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
/* Should not be invoked by anyone. */
throw new NotImplementedException();
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException {
/* Should not be invoked by anyone. */
throw new NotImplementedException();
}
@Override
public InetSocketAddress getConnectAddress() {
/* Should not be invoked by anyone. Normally used to set token service */
throw new NotImplementedException();
}
}
| 10,337
| 41.54321
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YarnClientProtocolProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
public class YarnClientProtocolProvider extends ClientProtocolProvider {
@Override
public ClientProtocol create(Configuration conf) throws IOException {
if (MRConfig.YARN_FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
return new YARNRunner(conf);
}
return null;
}
@Override
public ClientProtocol create(InetSocketAddress addr, Configuration conf)
throws IOException {
return create(conf);
}
@Override
public void close(ClientProtocol clientProtocol) throws IOException {
// nothing to do
}
}
| 1,685
| 32.058824
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.QueueAclsInfo;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.ReservationId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector;
import org.apache.hadoop.yarn.util.ConverterUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* This class enables the current JobClient (0.22 hadoop) to run on YARN.
*/
@SuppressWarnings("unchecked")
public class YARNRunner implements ClientProtocol {
private static final Log LOG = LogFactory.getLog(YARNRunner.class);
private final static RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
public final static Priority AM_CONTAINER_PRIORITY = recordFactory
.newRecordInstance(Priority.class);
static {
AM_CONTAINER_PRIORITY.setPriority(0);
}
private ResourceMgrDelegate resMgrDelegate;
private ClientCache clientCache;
private Configuration conf;
private final FileContext defaultFileContext;
/**
* Yarn runner incapsulates the client interface of
* yarn
* @param conf the configuration object for the client
*/
public YARNRunner(Configuration conf) {
this(conf, new ResourceMgrDelegate(new YarnConfiguration(conf)));
}
/**
* Similar to {@link #YARNRunner(Configuration)} but allowing injecting
* {@link ResourceMgrDelegate}. Enables mocking and testing.
* @param conf the configuration object for the client
* @param resMgrDelegate the resourcemanager client handle.
*/
public YARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate) {
this(conf, resMgrDelegate, new ClientCache(conf, resMgrDelegate));
}
/**
* Similar to {@link YARNRunner#YARNRunner(Configuration, ResourceMgrDelegate)}
* but allowing injecting {@link ClientCache}. Enable mocking and testing.
* @param conf the configuration object
* @param resMgrDelegate the resource manager delegate
* @param clientCache the client cache object.
*/
public YARNRunner(Configuration conf, ResourceMgrDelegate resMgrDelegate,
ClientCache clientCache) {
this.conf = conf;
try {
this.resMgrDelegate = resMgrDelegate;
this.clientCache = clientCache;
this.defaultFileContext = FileContext.getFileContext(this.conf);
} catch (UnsupportedFileSystemException ufe) {
throw new RuntimeException("Error in instantiating YarnClient", ufe);
}
}
@Private
/**
* Used for testing mostly.
* @param resMgrDelegate the resource manager delegate to set to.
*/
public void setResourceMgrDelegate(ResourceMgrDelegate resMgrDelegate) {
this.resMgrDelegate = resMgrDelegate;
}
@Override
public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
throws IOException, InterruptedException {
throw new UnsupportedOperationException("Use Token.renew instead");
}
@Override
public TaskTrackerInfo[] getActiveTrackers() throws IOException,
InterruptedException {
return resMgrDelegate.getActiveTrackers();
}
@Override
public JobStatus[] getAllJobs() throws IOException, InterruptedException {
return resMgrDelegate.getAllJobs();
}
@Override
public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
InterruptedException {
return resMgrDelegate.getBlacklistedTrackers();
}
@Override
public ClusterMetrics getClusterMetrics() throws IOException,
InterruptedException {
return resMgrDelegate.getClusterMetrics();
}
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
/* check if we have a hsproxy, if not, no need */
MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
/*
* note that get delegation token was called. Again this is hack for oozie
* to make sure we add history server delegation tokens to the credentials
*/
RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
Text service = resMgrDelegate.getRMDelegationTokenService();
if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
Text hsService = SecurityUtil.buildTokenService(hsProxy
.getConnectAddress());
if (ts.getToken(hsService) == null) {
ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
}
}
}
}
@VisibleForTesting
Token<?> getDelegationTokenFromHS(MRClientProtocol hsProxy)
throws IOException, InterruptedException {
GetDelegationTokenRequest request = recordFactory
.newRecordInstance(GetDelegationTokenRequest.class);
request.setRenewer(Master.getMasterPrincipal(conf));
org.apache.hadoop.yarn.api.records.Token mrDelegationToken;
mrDelegationToken = hsProxy.getDelegationToken(request)
.getDelegationToken();
return ConverterUtils.convertFromYarn(mrDelegationToken,
hsProxy.getConnectAddress());
}
@Override
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException, InterruptedException {
// The token is only used for serialization. So the type information
// mismatch should be fine.
return resMgrDelegate.getDelegationToken(renewer);
}
@Override
public String getFilesystemName() throws IOException, InterruptedException {
return resMgrDelegate.getFilesystemName();
}
@Override
public JobID getNewJobID() throws IOException, InterruptedException {
return resMgrDelegate.getNewJobID();
}
@Override
public QueueInfo getQueue(String queueName) throws IOException,
InterruptedException {
return resMgrDelegate.getQueue(queueName);
}
@Override
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
InterruptedException {
return resMgrDelegate.getQueueAclsForCurrentUser();
}
@Override
public QueueInfo[] getQueues() throws IOException, InterruptedException {
return resMgrDelegate.getQueues();
}
@Override
public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
return resMgrDelegate.getRootQueues();
}
@Override
public QueueInfo[] getChildQueues(String parent) throws IOException,
InterruptedException {
return resMgrDelegate.getChildQueues(parent);
}
@Override
public String getStagingAreaDir() throws IOException, InterruptedException {
return resMgrDelegate.getStagingAreaDir();
}
@Override
public String getSystemDir() throws IOException, InterruptedException {
return resMgrDelegate.getSystemDir();
}
@Override
public long getTaskTrackerExpiryInterval() throws IOException,
InterruptedException {
return resMgrDelegate.getTaskTrackerExpiryInterval();
}
@Override
public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
throws IOException, InterruptedException {
addHistoryToken(ts);
// Construct necessary information to start the MR AM
ApplicationSubmissionContext appContext =
createApplicationSubmissionContext(conf, jobSubmitDir, ts);
// Submit to ResourceManager
try {
ApplicationId applicationId =
resMgrDelegate.submitApplication(appContext);
ApplicationReport appMaster = resMgrDelegate
.getApplicationReport(applicationId);
String diagnostics =
(appMaster == null ?
"application report is null" : appMaster.getDiagnostics());
if (appMaster == null
|| appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
|| appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
throw new IOException("Failed to run job : " +
diagnostics);
}
return clientCache.getClient(jobId).getJobStatus(jobId);
} catch (YarnException e) {
throw new IOException(e);
}
}
private LocalResource createApplicationResource(FileContext fs, Path p, LocalResourceType type)
throws IOException {
LocalResource rsrc = recordFactory.newRecordInstance(LocalResource.class);
FileStatus rsrcStat = fs.getFileStatus(p);
rsrc.setResource(ConverterUtils.getYarnUrlFromPath(fs
.getDefaultFileSystem().resolvePath(rsrcStat.getPath())));
rsrc.setSize(rsrcStat.getLen());
rsrc.setTimestamp(rsrcStat.getModificationTime());
rsrc.setType(type);
rsrc.setVisibility(LocalResourceVisibility.APPLICATION);
return rsrc;
}
public ApplicationSubmissionContext createApplicationSubmissionContext(
Configuration jobConf,
String jobSubmitDir, Credentials ts) throws IOException {
ApplicationId applicationId = resMgrDelegate.getApplicationId();
// Setup resource requirements
Resource capability = recordFactory.newRecordInstance(Resource.class);
capability.setMemory(
conf.getInt(
MRJobConfig.MR_AM_VMEM_MB, MRJobConfig.DEFAULT_MR_AM_VMEM_MB
)
);
capability.setVirtualCores(
conf.getInt(
MRJobConfig.MR_AM_CPU_VCORES, MRJobConfig.DEFAULT_MR_AM_CPU_VCORES
)
);
LOG.debug("AppMaster capability = " + capability);
// Setup LocalResources
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
Path jobConfPath = new Path(jobSubmitDir, MRJobConfig.JOB_CONF_FILE);
URL yarnUrlForJobSubmitDir = ConverterUtils
.getYarnUrlFromPath(defaultFileContext.getDefaultFileSystem()
.resolvePath(
defaultFileContext.makeQualified(new Path(jobSubmitDir))));
LOG.debug("Creating setup context, jobSubmitDir url is "
+ yarnUrlForJobSubmitDir);
localResources.put(MRJobConfig.JOB_CONF_FILE,
createApplicationResource(defaultFileContext,
jobConfPath, LocalResourceType.FILE));
if (jobConf.get(MRJobConfig.JAR) != null) {
Path jobJarPath = new Path(jobConf.get(MRJobConfig.JAR));
LocalResource rc = createApplicationResource(
FileContext.getFileContext(jobJarPath.toUri(), jobConf),
jobJarPath,
LocalResourceType.PATTERN);
String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN,
JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
rc.setPattern(pattern);
localResources.put(MRJobConfig.JOB_JAR, rc);
} else {
// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
// mapreduce jar itself which is already on the classpath.
LOG.info("Job jar is not present. "
+ "Not adding any jar to the list of resources.");
}
// TODO gross hack
for (String s : new String[] {
MRJobConfig.JOB_SPLIT,
MRJobConfig.JOB_SPLIT_METAINFO }) {
localResources.put(
MRJobConfig.JOB_SUBMIT_DIR + "/" + s,
createApplicationResource(defaultFileContext,
new Path(jobSubmitDir, s), LocalResourceType.FILE));
}
// Setup security tokens
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
// Setup the command to run the AM
List<String> vargs = new ArrayList<String>(8);
vargs.add(MRApps.crossPlatformifyMREnv(jobConf, Environment.JAVA_HOME)
+ "/bin/java");
MRApps.addLog4jSystemProperties(null, vargs, conf);
// Check for Java Lib Path usage in MAP and REDUCE configs
warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS,""), "map",
MRJobConfig.MAP_JAVA_OPTS, MRJobConfig.MAP_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,""), "map",
MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS,""), "reduce",
MRJobConfig.REDUCE_JAVA_OPTS, MRJobConfig.REDUCE_ENV);
warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,""), "reduce",
MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
// Add AM admin command opts before user command opts
// so that it can be overridden by user
String mrAppMasterAdminOptions = conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS);
warnForJavaLibPath(mrAppMasterAdminOptions, "app master",
MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV);
vargs.add(mrAppMasterAdminOptions);
// Add AM user command opts
String mrAppMasterUserOptions = conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS);
warnForJavaLibPath(mrAppMasterUserOptions, "app master",
MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV);
vargs.add(mrAppMasterUserOptions);
if (jobConf.getBoolean(MRJobConfig.MR_AM_PROFILE,
MRJobConfig.DEFAULT_MR_AM_PROFILE)) {
final String profileParams = jobConf.get(MRJobConfig.MR_AM_PROFILE_PARAMS,
MRJobConfig.DEFAULT_TASK_PROFILE_PARAMS);
if (profileParams != null) {
vargs.add(String.format(profileParams,
ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR
+ TaskLog.LogName.PROFILE));
}
}
vargs.add(MRJobConfig.APPLICATION_MASTER_CLASS);
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
Path.SEPARATOR + ApplicationConstants.STDOUT);
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
Path.SEPARATOR + ApplicationConstants.STDERR);
Vector<String> vargsFinal = new Vector<String>(8);
// Final command
StringBuilder mergedCommand = new StringBuilder();
for (CharSequence str : vargs) {
mergedCommand.append(str).append(" ");
}
vargsFinal.add(mergedCommand.toString());
LOG.debug("Command to launch container for ApplicationMaster is : "
+ mergedCommand);
// Setup the CLASSPATH in environment
// i.e. add { Hadoop jars, job jar, CWD } to classpath.
Map<String, String> environment = new HashMap<String, String>();
MRApps.setClasspath(environment, conf);
// Shell
environment.put(Environment.SHELL.name(),
conf.get(MRJobConfig.MAPRED_ADMIN_USER_SHELL,
MRJobConfig.DEFAULT_SHELL));
// Add the container working directory at the front of LD_LIBRARY_PATH
MRApps.addToEnvironment(environment, Environment.LD_LIBRARY_PATH.name(),
MRApps.crossPlatformifyMREnv(conf, Environment.PWD), conf);
// Setup the environment variables for Admin first
MRApps.setEnvFromInputString(environment,
conf.get(MRJobConfig.MR_AM_ADMIN_USER_ENV), conf);
// Setup the environment variables (LD_LIBRARY_PATH, etc)
MRApps.setEnvFromInputString(environment,
conf.get(MRJobConfig.MR_AM_ENV), conf);
// Parse distributed cache
MRApps.setupDistributedCache(jobConf, localResources);
Map<ApplicationAccessType, String> acls
= new HashMap<ApplicationAccessType, String>(2);
acls.put(ApplicationAccessType.VIEW_APP, jobConf.get(
MRJobConfig.JOB_ACL_VIEW_JOB, MRJobConfig.DEFAULT_JOB_ACL_VIEW_JOB));
acls.put(ApplicationAccessType.MODIFY_APP, jobConf.get(
MRJobConfig.JOB_ACL_MODIFY_JOB,
MRJobConfig.DEFAULT_JOB_ACL_MODIFY_JOB));
// Setup ContainerLaunchContext for AM container
ContainerLaunchContext amContainer =
ContainerLaunchContext.newInstance(localResources, environment,
vargsFinal, null, securityTokens, acls);
Collection<String> tagsFromConf =
jobConf.getTrimmedStringCollection(MRJobConfig.JOB_TAGS);
// Set up the ApplicationSubmissionContext
ApplicationSubmissionContext appContext =
recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
appContext.setApplicationId(applicationId); // ApplicationId
appContext.setQueue( // Queue name
jobConf.get(JobContext.QUEUE_NAME,
YarnConfiguration.DEFAULT_QUEUE_NAME));
// add reservationID if present
ReservationId reservationID = null;
try {
reservationID =
ReservationId.parseReservationId(jobConf
.get(JobContext.RESERVATION_ID));
} catch (NumberFormatException e) {
// throw exception as reservationid as is invalid
String errMsg =
"Invalid reservationId: " + jobConf.get(JobContext.RESERVATION_ID)
+ " specified for the app: " + applicationId;
LOG.warn(errMsg);
throw new IOException(errMsg);
}
if (reservationID != null) {
appContext.setReservationID(reservationID);
LOG.info("SUBMITTING ApplicationSubmissionContext app:" + applicationId
+ " to queue:" + appContext.getQueue() + " with reservationId:"
+ appContext.getReservationID());
}
appContext.setApplicationName( // Job name
jobConf.get(JobContext.JOB_NAME,
YarnConfiguration.DEFAULT_APPLICATION_NAME));
appContext.setCancelTokensWhenComplete(
conf.getBoolean(MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN, true));
appContext.setAMContainerSpec(amContainer); // AM Container
appContext.setMaxAppAttempts(
conf.getInt(MRJobConfig.MR_AM_MAX_ATTEMPTS,
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS));
appContext.setResource(capability);
// set labels for the AM container request if present
String amNodelabelExpression = conf.get(MRJobConfig.AM_NODE_LABEL_EXP);
if (null != amNodelabelExpression
&& amNodelabelExpression.trim().length() != 0) {
ResourceRequest amResourceRequest =
recordFactory.newRecordInstance(ResourceRequest.class);
amResourceRequest.setPriority(AM_CONTAINER_PRIORITY);
amResourceRequest.setResourceName(ResourceRequest.ANY);
amResourceRequest.setCapability(capability);
amResourceRequest.setNumContainers(1);
amResourceRequest.setNodeLabelExpression(amNodelabelExpression.trim());
appContext.setAMContainerResourceRequest(amResourceRequest);
}
// set labels for the Job containers
appContext.setNodeLabelExpression(jobConf
.get(JobContext.JOB_NODE_LABEL_EXP));
appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE);
if (tagsFromConf != null && !tagsFromConf.isEmpty()) {
appContext.setApplicationTags(new HashSet<String>(tagsFromConf));
}
return appContext;
}
@Override
public void setJobPriority(JobID arg0, String arg1) throws IOException,
InterruptedException {
resMgrDelegate.setJobPriority(arg0, arg1);
}
@Override
public long getProtocolVersion(String arg0, long arg1) throws IOException {
return resMgrDelegate.getProtocolVersion(arg0, arg1);
}
@Override
public long renewDelegationToken(Token<DelegationTokenIdentifier> arg0)
throws IOException, InterruptedException {
throw new UnsupportedOperationException("Use Token.renew instead");
}
@Override
public Counters getJobCounters(JobID arg0) throws IOException,
InterruptedException {
return clientCache.getClient(arg0).getJobCounters(arg0);
}
@Override
public String getJobHistoryDir() throws IOException, InterruptedException {
return JobHistoryUtils.getConfiguredHistoryServerDoneDirPrefix(conf);
}
@Override
public JobStatus getJobStatus(JobID jobID) throws IOException,
InterruptedException {
JobStatus status = clientCache.getClient(jobID).getJobStatus(jobID);
return status;
}
@Override
public TaskCompletionEvent[] getTaskCompletionEvents(JobID arg0, int arg1,
int arg2) throws IOException, InterruptedException {
return clientCache.getClient(arg0).getTaskCompletionEvents(arg0, arg1, arg2);
}
@Override
public String[] getTaskDiagnostics(TaskAttemptID arg0) throws IOException,
InterruptedException {
return clientCache.getClient(arg0.getJobID()).getTaskDiagnostics(arg0);
}
@Override
public TaskReport[] getTaskReports(JobID jobID, TaskType taskType)
throws IOException, InterruptedException {
return clientCache.getClient(jobID)
.getTaskReports(jobID, taskType);
}
private void killUnFinishedApplication(ApplicationId appId)
throws IOException {
ApplicationReport application = null;
try {
application = resMgrDelegate.getApplicationReport(appId);
} catch (YarnException e) {
throw new IOException(e);
}
if (application.getYarnApplicationState() == YarnApplicationState.FINISHED
|| application.getYarnApplicationState() == YarnApplicationState.FAILED
|| application.getYarnApplicationState() == YarnApplicationState.KILLED) {
return;
}
killApplication(appId);
}
private void killApplication(ApplicationId appId) throws IOException {
try {
resMgrDelegate.killApplication(appId);
} catch (YarnException e) {
throw new IOException(e);
}
}
private boolean isJobInTerminalState(JobStatus status) {
return status.getState() == JobStatus.State.KILLED
|| status.getState() == JobStatus.State.FAILED
|| status.getState() == JobStatus.State.SUCCEEDED;
}
@Override
public void killJob(JobID arg0) throws IOException, InterruptedException {
/* check if the status is not running, if not send kill to RM */
JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
ApplicationId appId = TypeConverter.toYarn(arg0).getAppId();
// get status from RM and return
if (status == null) {
killUnFinishedApplication(appId);
return;
}
if (status.getState() != JobStatus.State.RUNNING) {
killApplication(appId);
return;
}
try {
/* send a kill to the AM */
clientCache.getClient(arg0).killJob(arg0);
long currentTimeMillis = System.currentTimeMillis();
long timeKillIssued = currentTimeMillis;
long killTimeOut =
conf.getLong(MRJobConfig.MR_AM_HARD_KILL_TIMEOUT_MS,
MRJobConfig.DEFAULT_MR_AM_HARD_KILL_TIMEOUT_MS);
while ((currentTimeMillis < timeKillIssued + killTimeOut)
&& !isJobInTerminalState(status)) {
try {
Thread.sleep(1000L);
} catch (InterruptedException ie) {
/** interrupted, just break */
break;
}
currentTimeMillis = System.currentTimeMillis();
status = clientCache.getClient(arg0).getJobStatus(arg0);
if (status == null) {
killUnFinishedApplication(appId);
return;
}
}
} catch(IOException io) {
LOG.debug("Error when checking for application status", io);
}
if (status != null && !isJobInTerminalState(status)) {
killApplication(appId);
}
}
@Override
public boolean killTask(TaskAttemptID arg0, boolean arg1) throws IOException,
InterruptedException {
return clientCache.getClient(arg0.getJobID()).killTask(arg0, arg1);
}
@Override
public AccessControlList getQueueAdmins(String arg0) throws IOException {
return new AccessControlList("*");
}
@Override
public JobTrackerStatus getJobTrackerStatus() throws IOException,
InterruptedException {
return JobTrackerStatus.RUNNING;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(this, protocol, clientVersion,
clientMethodsHash);
}
@Override
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
throws IOException {
return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
}
private static void warnForJavaLibPath(String opts, String component,
String javaConf, String envConf) {
if (opts != null && opts.contains("-Djava.library.path")) {
LOG.warn("Usage of -Djava.library.path in " + javaConf + " can cause " +
"programs to no longer function if hadoop native libraries " +
"are used. These values should be set as part of the " +
"LD_LIBRARY_PATH in the " + component + " JVM env using " +
envConf + " config settings.");
}
}
}
| 28,404
| 37.48916
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
public class ClientCache {
private final Configuration conf;
private final ResourceMgrDelegate rm;
private static final Log LOG = LogFactory.getLog(ClientCache.class);
private Map<JobID, ClientServiceDelegate> cache =
new HashMap<JobID, ClientServiceDelegate>();
private MRClientProtocol hsProxy;
public ClientCache(Configuration conf, ResourceMgrDelegate rm) {
this.conf = conf;
this.rm = rm;
}
//TODO: evict from the cache on some threshold
public synchronized ClientServiceDelegate getClient(JobID jobId) {
if (hsProxy == null) {
try {
hsProxy = instantiateHistoryProxy();
} catch (IOException e) {
LOG.warn("Could not connect to History server.", e);
throw new YarnRuntimeException("Could not connect to History server.", e);
}
}
ClientServiceDelegate client = cache.get(jobId);
if (client == null) {
client = new ClientServiceDelegate(conf, rm, jobId, hsProxy);
cache.put(jobId, client);
}
return client;
}
protected synchronized MRClientProtocol getInitializedHSProxy()
throws IOException {
if (this.hsProxy == null) {
hsProxy = instantiateHistoryProxy();
}
return this.hsProxy;
}
protected MRClientProtocol instantiateHistoryProxy()
throws IOException {
final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
if (StringUtils.isEmpty(serviceAddr)) {
return null;
}
LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
final YarnRPC rpc = YarnRPC.create(conf);
LOG.debug("Connected to HistoryServer at: " + serviceAddr);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() {
return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
NetUtils.createSocketAddr(serviceAddr), conf);
}
});
}
}
| 3,555
| 34.207921
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/TestMapreduceConfigFields.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.util.HashSet;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
/**
* Unit test class to compare the following MR Configuration classes:
* <p></p>
* {@link org.apache.hadoop.mapreduce.MRJobConfig}
* {@link org.apache.hadoop.mapreduce.MRConfig}
* {@link org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig}
* {@link org.apache.hadoop.mapred.ShuffleHandler}
* {@link org.apache.hadoop.mapreduce.lib.output.FileOutputFormat}
* {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
* {@link org.apache.hadoop.mapreduce.Job}
* {@link org.apache.hadoop.mapreduce.lib.input.NLineInputFormat}
* {@link org.apache.hadoop.mapred.JobConf}
* <p></p>
* against mapred-default.xml for missing properties. Currently only
* throws an error if the class is missing a property.
* <p></p>
* Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
* for how this class works.
*/
public class TestMapreduceConfigFields extends TestConfigurationFieldsBase {
@Override
public void initializeMemberVariables() {
xmlFilename = new String("mapred-default.xml");
configurationClasses = new Class[] { MRJobConfig.class, MRConfig.class,
JHAdminConfig.class, ShuffleHandler.class, FileOutputFormat.class,
FileInputFormat.class, Job.class, NLineInputFormat.class,
JobConf.class, FileOutputCommitter.class };
// Initialize used variables
configurationPropsToSkipCompare = new HashSet<String>();
xmlPropsToSkipCompare = new HashSet<String>();
// Set error modes
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = false;
// Ignore deprecated MR1 properties in JobConf
configurationPropsToSkipCompare
.add(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY);
configurationPropsToSkipCompare
.add(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY);
// Obsolete entries listed in MAPREDUCE-6057 were removed from trunk
// but not removed from branch-2.
xmlPropsToSkipCompare.add("map.sort.class");
xmlPropsToSkipCompare.add("mapreduce.reduce.skip.proc.count.autoincr");
xmlPropsToSkipCompare.add("mapreduce.map.skip.proc.count.autoincr");
xmlPropsToSkipCompare.add("mapreduce.local.clientfactory.class.name");
}
}
| 3,506
| 40.75
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.CounterGroup;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.After;
import org.junit.AfterClass;
import static org.junit.Assert.assertFalse;
import org.junit.BeforeClass;
import org.apache.hadoop.yarn.server.MiniYARNCluster;
import org.apache.hadoop.yarn.server.timeline.TimelineStore;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Test;
import org.mockito.Mockito;
public class TestJobHistoryEventHandler {
private static final Log LOG = LogFactory
.getLog(TestJobHistoryEventHandler.class);
private static MiniDFSCluster dfsCluster = null;
private static String coreSitePath;
@BeforeClass
public static void setUpClass() throws Exception {
coreSitePath = "." + File.separator + "target" + File.separator +
"test-classes" + File.separator + "core-site.xml";
Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
}
@AfterClass
public static void cleanUpClass() throws Exception {
dfsCluster.shutdown();
}
@After
public void cleanTest() throws Exception {
new File(coreSitePath).delete();
}
@Test (timeout=50000)
public void testFirstFlushOnCompletionEvent() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
60 * 1000l);
conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
conf.setInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 200);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
mockWriter = jheh.getEventWriter();
verify(mockWriter).write(any(HistoryEvent.class));
for (int i = 0; i < 100; i++) {
queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(
t.taskID, 0, TaskType.MAP, "")));
}
handleNextNEvents(jheh, 100);
verify(mockWriter, times(0)).flush();
// First completion event, but min-queue-size for batching flushes is 10
handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
verify(mockWriter).flush();
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
@Test (timeout=50000)
public void testMaxUnflushedCompletionEvents() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
60 * 1000l);
conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
conf.setInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 5);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
mockWriter = jheh.getEventWriter();
verify(mockWriter).write(any(HistoryEvent.class));
for (int i = 0 ; i < 100 ; i++) {
queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
}
handleNextNEvents(jheh, 9);
verify(mockWriter, times(0)).flush();
handleNextNEvents(jheh, 1);
verify(mockWriter).flush();
handleNextNEvents(jheh, 50);
verify(mockWriter, times(6)).flush();
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
@Test (timeout=50000)
public void testUnflushedTimer() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
2 * 1000l); //2 seconds.
conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 100);
conf.setInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 5);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
mockWriter = jheh.getEventWriter();
verify(mockWriter).write(any(HistoryEvent.class));
for (int i = 0 ; i < 100 ; i++) {
queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
}
handleNextNEvents(jheh, 9);
Assert.assertTrue(jheh.getFlushTimerStatus());
verify(mockWriter, times(0)).flush();
Thread.sleep(2 * 4 * 1000l); // 4 seconds should be enough. Just be safe.
verify(mockWriter).flush();
Assert.assertFalse(jheh.getFlushTimerStatus());
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
@Test (timeout=50000)
public void testBatchedFlushJobEndMultiplier() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
60 * 1000l); //2 seconds.
conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 3);
conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
conf.setInt(
MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 0);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
mockWriter = jheh.getEventWriter();
verify(mockWriter).write(any(HistoryEvent.class));
for (int i = 0 ; i < 100 ; i++) {
queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null)));
}
queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters())));
handleNextNEvents(jheh, 29);
verify(mockWriter, times(0)).flush();
handleNextNEvents(jheh, 72);
verify(mockWriter, times(4)).flush(); //3 * 30 + 1 for JobFinished
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
// In case of all types of events, process Done files if it's last AM retry
@Test (timeout=50000)
public void testProcessDoneFilesOnLastAMRetry() throws Exception {
TestParams t = new TestParams(true);
Configuration conf = new Configuration();
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
verify(jheh, times(0)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.ERROR.toString())));
verify(jheh, times(1)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
verify(jheh, times(2)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.FAILED.toString())));
verify(jheh, times(3)).processDoneFiles(any(JobId.class));
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.KILLED.toString())));
verify(jheh, times(4)).processDoneFiles(any(JobId.class));
mockWriter = jheh.getEventWriter();
verify(mockWriter, times(5)).write(any(HistoryEvent.class));
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
// Skip processing Done files in case of ERROR, if it's not last AM retry
@Test (timeout=50000)
public void testProcessDoneFilesNotLastAMRetry() throws Exception {
TestParams t = new TestParams(false);
Configuration conf = new Configuration();
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
verify(jheh, times(0)).processDoneFiles(t.jobId);
// skip processing done files
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.ERROR.toString())));
verify(jheh, times(0)).processDoneFiles(t.jobId);
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
verify(jheh, times(1)).processDoneFiles(t.jobId);
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.FAILED.toString())));
verify(jheh, times(2)).processDoneFiles(t.jobId);
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.KILLED.toString())));
verify(jheh, times(3)).processDoneFiles(t.jobId);
mockWriter = jheh.getEventWriter();
verify(mockWriter, times(5)).write(any(HistoryEvent.class));
} finally {
jheh.stop();
verify(mockWriter).close();
}
}
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
}
}
@Test
public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
// Test relative path
Configuration conf = new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
"/mapred/history/done_intermediate");
conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
String pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals("/mapred/history/done_intermediate/" +
System.getProperty("user.name"), pathStr);
// Test fully qualified path
// Create default configuration pointing to the minicluster
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// Simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
pathStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals(dfsCluster.getURI().toString() +
"/mapred/history/done_intermediate/" + System.getProperty("user.name"),
pathStr);
}
// test AMStartedEvent for submitTime and startTime
@Test (timeout=50000)
public void testAMStartedEvent() throws Exception {
TestParams t = new TestParams();
Configuration conf = new Configuration();
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
EventWriter mockWriter = null;
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, 100)));
JobHistoryEventHandler.MetaInfo mi =
JobHistoryEventHandler.fileMap.get(t.jobId);
Assert.assertEquals(mi.getJobIndexInfo().getSubmitTime(), 100);
Assert.assertEquals(mi.getJobIndexInfo().getJobStartTime(), 200);
Assert.assertEquals(mi.getJobSummary().getJobSubmitTime(), 100);
Assert.assertEquals(mi.getJobSummary().getJobLaunchTime(), 200);
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
0, 0, JobStateInternal.FAILED.toString())));
Assert.assertEquals(mi.getJobIndexInfo().getSubmitTime(), 100);
Assert.assertEquals(mi.getJobIndexInfo().getJobStartTime(), 200);
Assert.assertEquals(mi.getJobSummary().getJobSubmitTime(), 100);
Assert.assertEquals(mi.getJobSummary().getJobLaunchTime(), 200);
verify(jheh, times(1)).processDoneFiles(t.jobId);
mockWriter = jheh.getEventWriter();
verify(mockWriter, times(2)).write(any(HistoryEvent.class));
} finally {
jheh.stop();
}
}
// Have JobHistoryEventHandler handle some events and make sure they get
// stored to the Timeline store
@Test (timeout=50000)
public void testTimelineEventHandling() throws Exception {
TestParams t = new TestParams(false);
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
JHEvenHandlerForTest jheh = new JHEvenHandlerForTest(t.mockAppContext, 0);
jheh.init(conf);
MiniYARNCluster yarnCluster = null;
long currentTime = System.currentTimeMillis();
try {
yarnCluster = new MiniYARNCluster(
TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
yarnCluster.init(conf);
yarnCluster.start();
jheh.start();
TimelineStore ts = yarnCluster.getApplicationHistoryServer()
.getTimelineStore();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1),
currentTime - 10));
TimelineEntities entities = ts.getEntities("MAPREDUCE_JOB", null, null,
null, null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
TimelineEntity tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(1, tEntity.getEvents().size());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(currentTime - 10,
tEntity.getEvents().get(0).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobSubmittedEvent(TypeConverter.fromYarn(t.jobId), "name",
"user", 200, "/foo/job.xml",
new HashMap<JobACL, AccessControlList>(), "default"),
currentTime + 10));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(2, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(currentTime + 10,
tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime - 10,
tEntity.getEvents().get(1).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobQueueChangeEvent(TypeConverter.fromYarn(t.jobId), "q2"),
currentTime - 20));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(3, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(),
tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(currentTime + 10,
tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime - 10,
tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime - 20,
tEntity.getEvents().get(2).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
0, new Counters(), new Counters(), new Counters()), currentTime));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(4, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(),
tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(),
tEntity.getEvents().get(3).getEventType());
Assert.assertEquals(currentTime + 10,
tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime,
tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime - 10,
tEntity.getEvents().get(2).getTimestamp());
Assert.assertEquals(currentTime - 20,
tEntity.getEvents().get(3).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.jobId.toString(), tEntity.getEntityId());
Assert.assertEquals(5, tEntity.getEvents().size());
Assert.assertEquals(EventType.JOB_KILLED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(EventType.JOB_SUBMITTED.toString(),
tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(EventType.JOB_FINISHED.toString(),
tEntity.getEvents().get(2).getEventType());
Assert.assertEquals(EventType.AM_STARTED.toString(),
tEntity.getEvents().get(3).getEventType());
Assert.assertEquals(EventType.JOB_QUEUE_CHANGED.toString(),
tEntity.getEvents().get(4).getEventType());
Assert.assertEquals(currentTime + 20,
tEntity.getEvents().get(0).getTimestamp());
Assert.assertEquals(currentTime + 10,
tEntity.getEvents().get(1).getTimestamp());
Assert.assertEquals(currentTime,
tEntity.getEvents().get(2).getTimestamp());
Assert.assertEquals(currentTime - 10,
tEntity.getEvents().get(3).getTimestamp());
Assert.assertEquals(currentTime - 20,
tEntity.getEvents().get(4).getTimestamp());
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new TaskStartedEvent(t.taskID, 0, TaskType.MAP, "")));
entities = ts.getEntities("MAPREDUCE_TASK", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
Assert.assertEquals(1, tEntity.getEvents().size());
Assert.assertEquals(EventType.TASK_STARTED.toString(),
tEntity.getEvents().get(0).getEventType());
Assert.assertEquals(TaskType.MAP.toString(),
tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
handleEvent(jheh, new JobHistoryEvent(t.jobId,
new TaskStartedEvent(t.taskID, 0, TaskType.REDUCE, "")));
entities = ts.getEntities("MAPREDUCE_TASK", null, null, null,
null, null, null, null, null, null);
Assert.assertEquals(1, entities.getEntities().size());
tEntity = entities.getEntities().get(0);
Assert.assertEquals(t.taskID.toString(), tEntity.getEntityId());
Assert.assertEquals(2, tEntity.getEvents().size());
Assert.assertEquals(EventType.TASK_STARTED.toString(),
tEntity.getEvents().get(1).getEventType());
Assert.assertEquals(TaskType.REDUCE.toString(),
tEntity.getEvents().get(0).getEventInfo().get("TASK_TYPE"));
Assert.assertEquals(TaskType.MAP.toString(),
tEntity.getEvents().get(1).getEventInfo().get("TASK_TYPE"));
} finally {
if (yarnCluster != null) {
yarnCluster.stop();
}
}
}
@Test (timeout=50000)
public void testCountersToJSON() throws Exception {
JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0);
Counters counters = new Counters();
CounterGroup group1 = counters.addGroup("DOCTORS",
"Incarnations of the Doctor");
group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12);
group1.addCounter("MATT_SMITH", "Matt Smith", 11);
group1.addCounter("DAVID_TENNANT", "David Tennant", 10);
CounterGroup group2 = counters.addGroup("COMPANIONS",
"Companions of the Doctor");
group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6);
group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5);
group2.addCounter("AMY_POND", "Amy Pond", 4);
group2.addCounter("MARTHA_JONES", "Martha Jones", 3);
group2.addCounter("DONNA_NOBLE", "Donna Noble", 2);
group2.addCounter("ROSE_TYLER", "Rose Tyler", 1);
JsonNode jsonNode = jheh.countersToJSON(counters);
String jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions "
+ "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\""
+ ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\","
+ "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":"
+ "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2},"
+ "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\","
+ "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory "
+ "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":"
+ "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\""
+ ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":"
+ "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10},"
+ "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":"
+ "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\","
+ "\"VALUE\":12}]}]";
Assert.assertEquals(expected, jsonStr);
}
@Test (timeout=50000)
public void testCountersToJSONEmpty() throws Exception {
JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0);
Counters counters = null;
JsonNode jsonNode = jheh.countersToJSON(counters);
String jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
String expected = "[]";
Assert.assertEquals(expected, jsonStr);
counters = new Counters();
jsonNode = jheh.countersToJSON(counters);
jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
expected = "[]";
Assert.assertEquals(expected, jsonStr);
counters.addGroup("DOCTORS", "Incarnations of the Doctor");
jsonNode = jheh.countersToJSON(counters);
jsonStr = new ObjectMapper().writeValueAsString(jsonNode);
expected = "[{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\":\"Incarnations of the "
+ "Doctor\",\"COUNTERS\":[]}]";
Assert.assertEquals(expected, jsonStr);
}
private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) {
jheh.handle(event);
}
private void handleEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event)
throws InterruptedException {
jheh.handle(event);
jheh.handleEvent(jheh.eventQueue.take());
}
private void handleNextNEvents(JHEvenHandlerForTest jheh, int numEvents)
throws InterruptedException {
for (int i = 0; i < numEvents; i++) {
jheh.handleEvent(jheh.eventQueue.take());
}
}
private String setupTestWorkDir() {
File testWorkDir = new File("target", this.getClass().getCanonicalName());
try {
FileContext.getLocalFSFileContext().delete(
new Path(testWorkDir.getAbsolutePath()), true);
return testWorkDir.getAbsolutePath();
} catch (Exception e) {
LOG.warn("Could not cleanup", e);
throw new YarnRuntimeException("could not cleanup test dir", e);
}
}
private AppContext mockAppContext(ApplicationId appId, boolean isLastAMRetry) {
JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId));
AppContext mockContext = mock(AppContext.class);
Job mockJob = mock(Job.class);
when(mockJob.getAllCounters()).thenReturn(new Counters());
when(mockJob.getTotalMaps()).thenReturn(10);
when(mockJob.getTotalReduces()).thenReturn(10);
when(mockJob.getName()).thenReturn("mockjob");
when(mockContext.getJob(jobId)).thenReturn(mockJob);
when(mockContext.getApplicationID()).thenReturn(appId);
when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry);
return mockContext;
}
private class TestParams {
boolean isLastAMRetry;
String workDir = setupTestWorkDir();
String dfsWorkDir = "/" + this.getClass().getCanonicalName();
ApplicationId appId = ApplicationId.newInstance(200, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
TaskID taskID = TaskID.forName("task_200707121733_0003_m_000005");
TaskAttemptID taskAttemptID = new TaskAttemptID(taskID, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
AppContext mockAppContext;
public TestParams() {
this(false);
}
public TestParams(boolean isLastAMRetry) {
this.isLastAMRetry = isLastAMRetry;
mockAppContext = mockAppContext(appId, this.isLastAMRetry);
}
}
private JobHistoryEvent getEventToEnqueue(JobId jobId) {
HistoryEvent toReturn = new JobStatusChangedEvent(new JobID(Integer.toString(jobId.getId()), jobId.getId()), "change status");
return new JobHistoryEvent(jobId, toReturn);
}
@Test
/**
* Tests that in case of SIGTERM, the JHEH stops without processing its event
* queue (because we must stop quickly lest we get SIGKILLed) and processes
* a JobUnsuccessfulEvent for jobs which were still running (so that they may
* show up in the JobHistoryServer)
*/
public void testSigTermedFunctionality() throws IOException {
AppContext mockedContext = Mockito.mock(AppContext.class);
JHEventHandlerForSigtermTest jheh =
new JHEventHandlerForSigtermTest(mockedContext, 0);
JobId jobId = Mockito.mock(JobId.class);
jheh.addToFileMap(jobId);
//Submit 4 events and check that they're handled in the absence of a signal
final int numEvents = 4;
JobHistoryEvent events[] = new JobHistoryEvent[numEvents];
for(int i=0; i < numEvents; ++i) {
events[i] = getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
//Make sure events were handled
assertTrue("handleEvent should've been called only 4 times but was "
+ jheh.eventsHandled, jheh.eventsHandled == 4);
//Create a new jheh because the last stop closed the eventWriter etc.
jheh = new JHEventHandlerForSigtermTest(mockedContext, 0);
// Make constructor of JobUnsuccessfulCompletionEvent pass
Job job = Mockito.mock(Job.class);
Mockito.when(mockedContext.getJob(jobId)).thenReturn(job);
// Make TypeConverter(JobID) pass
ApplicationId mockAppId = Mockito.mock(ApplicationId.class);
Mockito.when(mockAppId.getClusterTimestamp()).thenReturn(1000l);
Mockito.when(jobId.getAppId()).thenReturn(mockAppId);
jheh.addToFileMap(jobId);
jheh.setForcejobCompletion(true);
for(int i=0; i < numEvents; ++i) {
events[i] = getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
//Make sure events were handled, 4 + 1 finish event
assertTrue("handleEvent should've been called only 5 times but was "
+ jheh.eventsHandled, jheh.eventsHandled == 5);
assertTrue("Last event handled wasn't JobUnsuccessfulCompletionEvent",
jheh.lastEventHandled.getHistoryEvent()
instanceof JobUnsuccessfulCompletionEvent);
}
}
class JHEvenHandlerForTest extends JobHistoryEventHandler {
private EventWriter eventWriter;
private boolean mockHistoryProcessing = true;
public JHEvenHandlerForTest(AppContext context, int startCount) {
super(context, startCount);
JobHistoryEventHandler.fileMap.clear();
}
public JHEvenHandlerForTest(AppContext context, int startCount, boolean mockHistoryProcessing) {
super(context, startCount);
this.mockHistoryProcessing = mockHistoryProcessing;
JobHistoryEventHandler.fileMap.clear();
}
@Override
protected void serviceStart() {
}
@Override
protected EventWriter createEventWriter(Path historyFilePath)
throws IOException {
if (mockHistoryProcessing) {
this.eventWriter = mock(EventWriter.class);
}
else {
this.eventWriter = super.createEventWriter(historyFilePath);
}
return this.eventWriter;
}
@Override
protected void closeEventWriter(JobId jobId) {
}
public EventWriter getEventWriter() {
return this.eventWriter;
}
@Override
protected void processDoneFiles(JobId jobId) throws IOException {
if (!mockHistoryProcessing) {
super.processDoneFiles(jobId);
}
else {
// do nothing
}
}
}
/**
* Class to help with testSigTermedFunctionality
*/
class JHEventHandlerForSigtermTest extends JobHistoryEventHandler {
public JHEventHandlerForSigtermTest(AppContext context, int startCount) {
super(context, startCount);
}
public void addToFileMap(JobId jobId) {
MetaInfo metaInfo = Mockito.mock(MetaInfo.class);
Mockito.when(metaInfo.isWriterActive()).thenReturn(true);
fileMap.put(jobId, metaInfo);
}
JobHistoryEvent lastEventHandled;
int eventsHandled = 0;
@Override
public void handleEvent(JobHistoryEvent event) {
this.lastEventHandled = event;
this.eventsHandled++;
}
}
| 37,648
| 40.463656
| 130
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.junit.Test;
public class TestEvents {
private static final String taskId = "task_1_2_r_3";
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3);
Counters counters = new Counters();
TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId,
TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS",
counters);
assertEquals(test.getAttemptId().toString(), taskAttemptId.toString());
assertEquals(test.getCounters(), counters);
assertEquals(test.getFinishTime(), 123L);
assertEquals(test.getHostname(), "HOSTNAME");
assertEquals(test.getRackName(), "RAKNAME");
assertEquals(test.getState(), "STATUS");
assertEquals(test.getTaskId(), tid);
assertEquals(test.getTaskStatus(), "TEST");
assertEquals(test.getTaskType(), TaskType.REDUCE);
}
/**
* simple test JobPriorityChangeEvent and JobPriorityChange
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testJobPriorityChange() throws Exception {
org.apache.hadoop.mapreduce.JobID jid = new JobID("001", 1);
JobPriorityChangeEvent test = new JobPriorityChangeEvent(jid,
JobPriority.LOW);
assertEquals(test.getJobId().toString(), jid.toString());
assertEquals(test.getPriority(), JobPriority.LOW);
}
@Test(timeout = 10000)
public void testJobQueueChange() throws Exception {
org.apache.hadoop.mapreduce.JobID jid = new JobID("001", 1);
JobQueueChangeEvent test = new JobQueueChangeEvent(jid,
"newqueue");
assertEquals(test.getJobId().toString(), jid.toString());
assertEquals(test.getJobQueueName(), "newqueue");
}
/**
* simple test TaskUpdatedEvent and TaskUpdated
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testTaskUpdated() throws Exception {
JobID jid = new JobID("001", 1);
TaskID tid = new TaskID(jid, TaskType.REDUCE, 2);
TaskUpdatedEvent test = new TaskUpdatedEvent(tid, 1234L);
assertEquals(test.getTaskId().toString(), tid.toString());
assertEquals(test.getFinishTime(), 1234L);
}
/*
* test EventReader EventReader should read the list of events and return
* instance of HistoryEvent Different HistoryEvent should have a different
* datum.
*/
@Test(timeout = 10000)
public void testEvents() throws Exception {
EventReader reader = new EventReader(new DataInputStream(
new ByteArrayInputStream(getEvents())));
HistoryEvent e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED));
assertEquals("ID", ((JobPriorityChange) e.getDatum()).getJobid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED));
assertEquals("ID", ((JobStatusChanged) e.getDatum()).getJobid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.TASK_UPDATED));
assertEquals("ID", ((TaskUpdated) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals(taskId,
((TaskAttemptUnsuccessfulCompletion) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_KILLED));
assertEquals("ID",
((JobUnsuccessfulCompletion) e.getDatum()).getJobid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals(taskId,
((TaskAttemptStarted) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals(taskId,
((TaskAttemptFinished) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals(taskId,
((TaskAttemptUnsuccessfulCompletion) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals(taskId,
((TaskAttemptUnsuccessfulCompletion) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals(taskId,
((TaskAttemptStarted) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals(taskId,
((TaskAttemptFinished) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals(taskId,
((TaskAttemptUnsuccessfulCompletion) e.getDatum()).getTaskid().toString());
e = reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals(taskId,
((TaskAttemptUnsuccessfulCompletion) e.getDatum()).getTaskid().toString());
reader.close();
}
/*
* makes array of bytes with History events
*/
private byte[] getEvents() throws Exception {
ByteArrayOutputStream output = new ByteArrayOutputStream();
FSDataOutputStream fsOutput = new FSDataOutputStream(output,
new FileSystem.Statistics("scheme"));
EventWriter writer = new EventWriter(fsOutput,
EventWriter.WriteMode.JSON);
writer.write(getJobPriorityChangedEvent());
writer.write(getJobStatusChangedEvent());
writer.write(getTaskUpdatedEvent());
writer.write(getReduceAttemptKilledEvent());
writer.write(getJobKilledEvent());
writer.write(getSetupAttemptStartedEvent());
writer.write(getTaskAttemptFinishedEvent());
writer.write(getSetupAttemptFieledEvent());
writer.write(getSetupAttemptKilledEvent());
writer.write(getCleanupAttemptStartedEvent());
writer.write(getCleanupAttemptFinishedEvent());
writer.write(getCleanupAttemptFiledEvent());
writer.write(getCleanupAttemptKilledEvent());
writer.flush();
writer.close();
return output.toByteArray();
}
private FakeEvent getCleanupAttemptKilledEvent() {
FakeEvent result = new FakeEvent(EventType.CLEANUP_ATTEMPT_KILLED);
result.setDatum(getTaskAttemptUnsuccessfulCompletion());
return result;
}
private FakeEvent getCleanupAttemptFiledEvent() {
FakeEvent result = new FakeEvent(EventType.CLEANUP_ATTEMPT_FAILED);
result.setDatum(getTaskAttemptUnsuccessfulCompletion());
return result;
}
private TaskAttemptUnsuccessfulCompletion getTaskAttemptUnsuccessfulCompletion() {
TaskAttemptUnsuccessfulCompletion datum = new TaskAttemptUnsuccessfulCompletion();
datum.setAttemptId("attempt_1_2_r3_4_5");
datum.setClockSplits(Arrays.asList(1, 2, 3));
datum.setCpuUsages(Arrays.asList(100, 200, 300));
datum.setError("Error");
datum.setFinishTime(2L);
datum.setHostname("hostname");
datum.setRackname("rackname");
datum.setPhysMemKbytes(Arrays.asList(1000, 2000, 3000));
datum.setTaskid(taskId);
datum.setPort(1000);
datum.setTaskType("REDUCE");
datum.setStatus("STATUS");
datum.setCounters(getCounters());
datum.setVMemKbytes(Arrays.asList(1000, 2000, 3000));
return datum;
}
private JhCounters getCounters() {
JhCounters counters = new JhCounters();
counters.setGroups(new ArrayList<JhCounterGroup>(0));
counters.setName("name");
return counters;
}
private FakeEvent getCleanupAttemptFinishedEvent() {
FakeEvent result = new FakeEvent(EventType.CLEANUP_ATTEMPT_FINISHED);
TaskAttemptFinished datum = new TaskAttemptFinished();
datum.setAttemptId("attempt_1_2_r3_4_5");
datum.setCounters(getCounters());
datum.setFinishTime(2L);
datum.setHostname("hostname");
datum.setRackname("rackName");
datum.setState("state");
datum.setTaskid(taskId);
datum.setTaskStatus("taskStatus");
datum.setTaskType("REDUCE");
result.setDatum(datum);
return result;
}
private FakeEvent getCleanupAttemptStartedEvent() {
FakeEvent result = new FakeEvent(EventType.CLEANUP_ATTEMPT_STARTED);
TaskAttemptStarted datum = new TaskAttemptStarted();
datum.setAttemptId("attempt_1_2_r3_4_5");
datum.setAvataar("avatar");
datum.setContainerId("containerId");
datum.setHttpPort(10000);
datum.setLocality("locality");
datum.setShufflePort(10001);
datum.setStartTime(1L);
datum.setTaskid(taskId);
datum.setTaskType("taskType");
datum.setTrackerName("trackerName");
result.setDatum(datum);
return result;
}
private FakeEvent getSetupAttemptKilledEvent() {
FakeEvent result = new FakeEvent(EventType.SETUP_ATTEMPT_KILLED);
result.setDatum(getTaskAttemptUnsuccessfulCompletion());
return result;
}
private FakeEvent getSetupAttemptFieledEvent() {
FakeEvent result = new FakeEvent(EventType.SETUP_ATTEMPT_FAILED);
result.setDatum(getTaskAttemptUnsuccessfulCompletion());
return result;
}
private FakeEvent getTaskAttemptFinishedEvent() {
FakeEvent result = new FakeEvent(EventType.SETUP_ATTEMPT_FINISHED);
TaskAttemptFinished datum = new TaskAttemptFinished();
datum.setAttemptId("attempt_1_2_r3_4_5");
datum.setCounters(getCounters());
datum.setFinishTime(2L);
datum.setHostname("hostname");
datum.setRackname("rackname");
datum.setState("state");
datum.setTaskid(taskId);
datum.setTaskStatus("taskStatus");
datum.setTaskType("REDUCE");
result.setDatum(datum);
return result;
}
private FakeEvent getSetupAttemptStartedEvent() {
FakeEvent result = new FakeEvent(EventType.SETUP_ATTEMPT_STARTED);
TaskAttemptStarted datum = new TaskAttemptStarted();
datum.setAttemptId("ID");
datum.setAvataar("avataar");
datum.setContainerId("containerId");
datum.setHttpPort(10000);
datum.setLocality("locality");
datum.setShufflePort(10001);
datum.setStartTime(1L);
datum.setTaskid(taskId);
datum.setTaskType("taskType");
datum.setTrackerName("trackerName");
result.setDatum(datum);
return result;
}
private FakeEvent getJobKilledEvent() {
FakeEvent result = new FakeEvent(EventType.JOB_KILLED);
JobUnsuccessfulCompletion datum = new JobUnsuccessfulCompletion();
datum.setFinishedMaps(1);
datum.setFinishedReduces(2);
datum.setFinishTime(3L);
datum.setJobid("ID");
datum.setJobStatus("STATUS");
datum.setDiagnostics(JobImpl.JOB_KILLED_DIAG);
result.setDatum(datum);
return result;
}
private FakeEvent getReduceAttemptKilledEvent() {
FakeEvent result = new FakeEvent(EventType.REDUCE_ATTEMPT_KILLED);
result.setDatum(getTaskAttemptUnsuccessfulCompletion());
return result;
}
private FakeEvent getJobPriorityChangedEvent() {
FakeEvent result = new FakeEvent(EventType.JOB_PRIORITY_CHANGED);
JobPriorityChange datum = new JobPriorityChange();
datum.setJobid("ID");
datum.setPriority("priority");
result.setDatum(datum);
return result;
}
private FakeEvent getJobStatusChangedEvent() {
FakeEvent result = new FakeEvent(EventType.JOB_STATUS_CHANGED);
JobStatusChanged datum = new JobStatusChanged();
datum.setJobid("ID");
datum.setJobStatus("newStatus");
result.setDatum(datum);
return result;
}
private FakeEvent getTaskUpdatedEvent() {
FakeEvent result = new FakeEvent(EventType.TASK_UPDATED);
TaskUpdated datum = new TaskUpdated();
datum.setFinishTime(2L);
datum.setTaskid("ID");
result.setDatum(datum);
return result;
}
private class FakeEvent implements HistoryEvent {
private EventType eventType;
private Object datum;
public FakeEvent(EventType eventType) {
this.eventType = eventType;
}
@Override
public EventType getEventType() {
return eventType;
}
@Override
public Object getDatum() {
return datum;
}
@Override
public void setDatum(Object datum) {
this.datum = datum;
}
}
}
| 13,893
| 32.805353
| 86
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.jobhistory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestJobSummary {
private static final Log LOG = LogFactory.getLog(TestJobSummary.class);
private JobSummary summary = new JobSummary();
@Before
public void before() {
JobId mockJobId = mock(JobId.class);
when(mockJobId.toString()).thenReturn("testJobId");
summary.setJobId(mockJobId);
summary.setJobSubmitTime(2L);
summary.setJobLaunchTime(3L);
summary.setFirstMapTaskLaunchTime(4L);
summary.setFirstReduceTaskLaunchTime(5L);
summary.setJobFinishTime(6L);
summary.setNumFinishedMaps(1);
summary.setNumFailedMaps(0);
summary.setNumFinishedReduces(1);
summary.setNumFailedReduces(0);
summary.setUser("testUser");
summary.setQueue("testQueue");
summary.setJobStatus("testJobStatus");
summary.setMapSlotSeconds(7);
summary.setReduceSlotSeconds(8);
summary.setJobName("testName");
}
@Test
public void testEscapeJobSummary() {
// verify newlines are escaped
summary.setJobName("aa\rbb\ncc\r\ndd");
String out = summary.getJobSummaryString();
LOG.info("summary: " + out);
Assert.assertFalse(out.contains("\r"));
Assert.assertFalse(out.contains("\n"));
Assert.assertTrue(out.contains("aa\\rbb\\ncc\\r\\ndd"));
}
}
| 2,392
| 33.681159
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskAttemptReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptReportPBImpl;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
public class TestTaskAttemptReport {
@Test
public void testSetRawCounters() {
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
Counters counters = report.getCounters();
assertNotEquals(null, counters);
}
@Test
public void testBuildImplicitRawCounters() {
TaskAttemptReportPBImpl report = new TaskAttemptReportPBImpl();
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
MRProtos.TaskAttemptReportProto protoVal = report.getProto();
Counters counters = report.getCounters();
assertTrue(protoVal.hasCounters());
}
@Test
public void testCountersOverRawCounters() {
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
Counters altCounters = TypeConverter.toYarn(rCounters);
report.setRawCounters(rCounters);
report.setCounters(altCounters);
Counters counters = report.getCounters();
assertNotEquals(null, counters);
assertNotEquals(rCounters, altCounters);
assertEquals(counters, altCounters);
}
@Test
public void testUninitializedCounters() {
// Create basic class
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
// Verify properties initialized to null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetRawCountersToNull() {
// Create basic class
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
// Set raw counters to null
report.setRawCounters(null);
// Verify properties still null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetCountersToNull() {
// Create basic class
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
// Set raw counters to null
report.setCounters(null);
// Verify properties still null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetNonNullCountersToNull() {
// Create basic class
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
// Set raw counters
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
// Verify getCounters converts properly from raw to real
Counters counters = report.getCounters();
assertNotEquals(null, counters);
// Clear counters to null and then verify
report.setCounters(null);
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetNonNullRawCountersToNull() {
// Create basic class
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
// Set raw counters
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
// Verify getCounters converts properly from raw to real
Counters counters = report.getCounters();
assertNotEquals(null, counters);
// Clear counters to null and then verify
report.setRawCounters(null);
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
}
| 4,814
| 35.477273
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestTaskReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskReportPBImpl;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
public class TestTaskReport {
@Test
public void testSetRawCounters() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
// Set raw counters
report.setRawCounters(rCounters);
// Verify getCounters converts properly from raw to real
Counters counters = report.getCounters();
assertNotEquals(null, counters);
}
@Test
public void testBuildImplicitRawCounters() {
// Create basic class
TaskReportPBImpl report = new TaskReportPBImpl();
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
// Set raw counters
report.setRawCounters(rCounters);
// Verify getProto method implicitly converts/sets real counters
MRProtos.TaskReportProto protoVal = report.getProto();
assertTrue(protoVal.hasCounters());
}
@Test
public void testCountersOverRawCounters() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
Counters altCounters = TypeConverter.toYarn(rCounters);
// Set raw counters
report.setRawCounters(rCounters);
// Set real counters
report.setCounters(altCounters);
// Verify real counters has priority over raw
Counters counters = report.getCounters();
assertNotEquals(null, counters);
assertNotEquals(rCounters, altCounters);
assertEquals(counters, altCounters);
}
@Test
public void testUninitializedCounters() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
// Verify properties initialized to null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetRawCountersToNull() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
// Set raw counters to null
report.setRawCounters(null);
// Verify properties still null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetCountersToNull() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
// Set raw counters to null
report.setCounters(null);
// Verify properties still null
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetNonNullCountersToNull() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
// Set raw counters
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
// Verify getCounters converts properly from raw to real
Counters counters = report.getCounters();
assertNotEquals(null, counters);
// Clear counters to null and then verify
report.setCounters(null);
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
@Test
public void testSetNonNullRawCountersToNull() {
// Create basic class
TaskReport report = Records.newRecord(TaskReport.class);
// Set raw counters
org.apache.hadoop.mapreduce.Counters rCounters = MockJobs.newCounters();
report.setRawCounters(rCounters);
// Verify getCounters converts properly from raw to real
Counters counters = report.getCounters();
assertNotEquals(null, counters);
// Clear counters to null and then verify
report.setRawCounters(null);
assertEquals(null, report.getCounters());
assertEquals(null, report.getRawCounters());
}
}
| 4,988
| 34.892086
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.Proxy;
import java.net.URI;
import java.net.URISyntaxException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.junit.Assert;
import org.junit.Test;
/**
* Tests job end notification
*
*/
@SuppressWarnings("unchecked")
public class TestJobEndNotifier extends JobEndNotifier {
//Test maximum retries is capped by MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS
private void testNumRetries(Configuration conf) {
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "0");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "10");
setConf(conf);
Assert.assertTrue("Expected numTries to be 0, but was " + numTries,
numTries == 0 );
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1");
setConf(conf);
Assert.assertTrue("Expected numTries to be 1, but was " + numTries,
numTries == 1 );
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "20");
setConf(conf);
Assert.assertTrue("Expected numTries to be 11, but was " + numTries,
numTries == 11 ); //11 because number of _retries_ is 10
}
//Test maximum retry interval is capped by
//MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL
private void testWaitInterval(Configuration conf) {
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5000");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "1000");
setConf(conf);
Assert.assertTrue("Expected waitInterval to be 1000, but was "
+ waitInterval, waitInterval == 1000);
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "10000");
setConf(conf);
Assert.assertTrue("Expected waitInterval to be 5000, but was "
+ waitInterval, waitInterval == 5000);
//Test negative numbers are set to default
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "-10");
setConf(conf);
Assert.assertTrue("Expected waitInterval to be 5000, but was "
+ waitInterval, waitInterval == 5000);
}
private void testTimeout(Configuration conf) {
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_TIMEOUT, "1000");
setConf(conf);
Assert.assertTrue("Expected timeout to be 1000, but was "
+ timeout, timeout == 1000);
}
private void testProxyConfiguration(Configuration conf) {
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost");
setConf(conf);
Assert.assertTrue("Proxy shouldn't be set because port wasn't specified",
proxyToUse.type() == Proxy.Type.DIRECT);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:someport");
setConf(conf);
Assert.assertTrue("Proxy shouldn't be set because port wasn't numeric",
proxyToUse.type() == Proxy.Type.DIRECT);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:1000");
setConf(conf);
Assert.assertTrue("Proxy should have been set but wasn't ",
proxyToUse.toString().equals("HTTP @ somehost:1000"));
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "socks@somehost:1000");
setConf(conf);
Assert.assertTrue("Proxy should have been socks but wasn't ",
proxyToUse.toString().equals("SOCKS @ somehost:1000"));
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "SOCKS@somehost:1000");
setConf(conf);
Assert.assertTrue("Proxy should have been socks but wasn't ",
proxyToUse.toString().equals("SOCKS @ somehost:1000"));
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "sfafn@somehost:1000");
setConf(conf);
Assert.assertTrue("Proxy should have been http but wasn't ",
proxyToUse.toString().equals("HTTP @ somehost:1000"));
}
/**
* Test that setting parameters has the desired effect
*/
@Test
public void checkConfiguration() {
Configuration conf = new Configuration();
testNumRetries(conf);
testWaitInterval(conf);
testTimeout(conf);
testProxyConfiguration(conf);
}
protected int notificationCount = 0;
@Override
protected boolean notifyURLOnce() {
boolean success = super.notifyURLOnce();
notificationCount++;
return success;
}
//Check retries happen as intended
@Test
public void testNotifyRetries() throws InterruptedException {
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "0");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "5000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5000");
JobReport jobReport = mock(JobReport.class);
long startTime = System.currentTimeMillis();
this.notificationCount = 0;
this.setConf(conf);
this.notify(jobReport);
long endTime = System.currentTimeMillis();
Assert.assertEquals("Only 1 try was expected but was : "
+ this.notificationCount, 1, this.notificationCount);
Assert.assertTrue("Should have taken more than 5 seconds it took "
+ (endTime - startTime), endTime - startTime > 5000);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "3000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "3000");
startTime = System.currentTimeMillis();
this.notificationCount = 0;
this.setConf(conf);
this.notify(jobReport);
endTime = System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : "
+ this.notificationCount, 3, this.notificationCount);
Assert.assertTrue("Should have taken more than 9 seconds it took "
+ (endTime - startTime), endTime - startTime > 9000);
}
@Test
public void testNotificationOnLastRetryNormalShutdown() throws Exception {
HttpServer2 server = startHttpServer();
// Act like it is the second attempt. Default max attempts is 2
MRApp app = spy(new MRAppWithCustomContainerAllocator(
2, 2, true, this.getClass().getName(), true, 2, true));
doNothing().when(app).sysexit();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
// Unregistration succeeds: successfullyUnregistered is set
app.shutDownJob();
Assert.assertTrue(app.isLastAMRetry());
Assert.assertEquals(1, JobEndServlet.calledTimes);
Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
JobEndServlet.requestUri.getQuery());
Assert.assertEquals(JobState.SUCCEEDED.toString(),
JobEndServlet.foundJobState);
server.stop();
}
@Test
public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
throws Exception {
HttpServer2 server = startHttpServer();
MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
this.getClass().getName(), true, 1, false));
doNothing().when(app).sysexit();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
app.waitForState(job, JobState.RUNNING);
app.getContext().getEventHandler()
.handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job, JobStateInternal.REBOOT);
// Now shutdown.
// Unregistration fails: isLastAMRetry is recalculated, this is not
app.shutDownJob();
// Not the last AM attempt. So user should that the job is still running.
app.waitForState(job, JobState.RUNNING);
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0, JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
@Test
public void testNotificationOnLastRetryUnregistrationFailure()
throws Exception {
HttpServer2 server = startHttpServer();
MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
this.getClass().getName(), true, 2, false));
// Currently, we will have isLastRetry always equals to false at beginning
// of MRAppMaster, except staging area exists or commit already started at
// the beginning.
// Now manually set isLastRetry to true and this should reset to false when
// unregister failed.
app.isLastAMRetry = true;
doNothing().when(app).sysexit();
JobConf conf = new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job = (JobImpl)app.submit(conf);
app.waitForState(job, JobState.RUNNING);
app.getContext().getEventHandler()
.handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job, JobStateInternal.REBOOT);
// Now shutdown. User should see FAILED state.
// Unregistration fails: isLastAMRetry is recalculated, this is
///reboot will stop service internally, we don't need to shutdown twice
app.waitForServiceToStop(10000);
Assert.assertFalse(app.isLastAMRetry());
// Since it's not last retry, JobEndServlet didn't called
Assert.assertEquals(0, JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
private static HttpServer2 startHttpServer() throws Exception {
new File(System.getProperty(
"build.webapps", "build/webapps") + "/test").mkdirs();
HttpServer2 server = new HttpServer2.Builder().setName("test")
.addEndpoint(URI.create("http://localhost:0"))
.setFindPort(true).build();
server.addServlet("jobend", "/jobend", JobEndServlet.class);
server.start();
JobEndServlet.calledTimes = 0;
JobEndServlet.requestUri = null;
JobEndServlet.baseUrl = "http://localhost:"
+ server.getConnectorAddress(0).getPort() + "/";
JobEndServlet.foundJobState = null;
return server;
}
@SuppressWarnings("serial")
public static class JobEndServlet extends HttpServlet {
public static volatile int calledTimes = 0;
public static URI requestUri;
public static String baseUrl;
public static String foundJobState;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
InputStreamReader in = new InputStreamReader(request.getInputStream());
PrintStream out = new PrintStream(response.getOutputStream());
calledTimes++;
try {
requestUri = new URI(null, null,
request.getRequestURI(), request.getQueryString(), null);
foundJobState = request.getParameter("status");
} catch (URISyntaxException e) {
}
in.close();
out.close();
}
}
private class MRAppWithCustomContainerAllocator extends MRApp {
private boolean crushUnregistration;
public MRAppWithCustomContainerAllocator(int maps, int reduces,
boolean autoComplete, String testName, boolean cleanOnStart,
int startCount, boolean crushUnregistration) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
false);
this.crushUnregistration = crushUnregistration;
}
@Override
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
context = spy(context);
when(context.getEventHandler()).thenReturn(null);
when(context.getApplicationID()).thenReturn(null);
return new CustomContainerAllocator(this, context);
}
private class CustomContainerAllocator
extends RMCommunicator
implements ContainerAllocator, RMHeartbeatHandler {
private MRAppWithCustomContainerAllocator app;
private MRAppContainerAllocator allocator =
new MRAppContainerAllocator();
public CustomContainerAllocator(
MRAppWithCustomContainerAllocator app, AppContext context) {
super(null, context);
this.app = app;
}
@Override
public void serviceInit(Configuration conf) {
}
@Override
public void serviceStart() {
}
@Override
public void serviceStop() {
unregister();
}
@Override
protected void doUnregistration()
throws YarnException, IOException, InterruptedException {
if (crushUnregistration) {
app.successfullyUnregistered.set(true);
} else {
throw new YarnException("test exception");
}
}
@Override
public void handle(ContainerAllocatorEvent event) {
allocator.handle(event);
}
@Override
public long getLastHeartbeatTime() {
return allocator.getLastHeartbeatTime();
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
allocator.runOnNextHeartbeat(callback);
}
@Override
protected void heartbeat() throws Exception {
}
}
}
}
| 15,426
| 36.997537
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Assert;
import org.junit.Test;
public class TestFetchFailure {
@Test
public void testFetchFailure() throws Exception {
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct",
2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct",
1, events.length);
Assert.assertEquals("Event status not correct",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
Assert.assertEquals("Map TaskAttempt state not correct",
TaskAttemptState.FAILED, mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",
2, mapTask.getAttempts().size());
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
Assert.assertEquals("Event status not correct",
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct",
4, events.length);
Assert.assertEquals("Event map attempt id not correct",
mapAttempt1.getID(), events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",
mapAttempt1.getID(), events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",
mapAttempt2.getID(), events[2].getAttemptId());
Assert.assertEquals("Event redude attempt id not correct",
reduceAttempt.getID(), events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",
TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
TaskCompletionEvent mapEvents[] =
job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",
Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
Assert.assertEquals("Unexpected map event", convertedEvents[2],
mapEvents[0]);
}
/**
* This tests that if a map attempt was failed (say due to fetch failures),
* then it gets re-run. When the next map attempt is running, if the AM dies,
* then, on AM re-run, the AM does not incorrectly remember the first failed
* attempt. Currently recovery does not recover running tasks. Effectively,
* the AM re-runs the maps from scratch.
*/
@Test
public void testFetchFailureWithRecovery() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct",
2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct",
1, events.length);
Assert.assertEquals("Event status not correct",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
//send 3 fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
// Crash the app again.
app.stop();
//rerun
app =
new MRAppWithHistory(1, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct",
2, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask = it.next();
reduceTask = it.next();
// the map is not in a SUCCEEDED state after restart of AM
app.waitForState(mapTask, TaskState.RUNNING);
mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
reduceAttempt = reduceTask.getAttempts().values().iterator().next();
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct", 2, events.length);
}
@Test
public void testFetchFailureMultipleReduces() throws Exception {
MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
// map -> reduce -> fetch-failure -> map retry is incompatible with
// sequential, single-task-attempt approach in uber-AM, so disable:
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct",
4, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
Task reduceTask2 = it.next();
Task reduceTask3 = it.next();
//wait for Task state move to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
//send the done signal to the map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt1.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events =
job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct",
1, events.length);
Assert.assertEquals("Event status not correct",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
// wait for reduce to start running
app.waitForState(reduceTask, TaskState.RUNNING);
app.waitForState(reduceTask2, TaskState.RUNNING);
app.waitForState(reduceTask3, TaskState.RUNNING);
TaskAttempt reduceAttempt =
reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt, Phase.SHUFFLE);
TaskAttempt reduceAttempt2 =
reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
TaskAttempt reduceAttempt3 =
reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
//send 2 fetch failures from reduce to prepare for map re execution
sendFetchFailure(app, reduceAttempt, mapAttempt1, "host1");
sendFetchFailure(app, reduceAttempt2, mapAttempt1, "host2");
//We should not re-launch the map task yet
assertEquals(TaskState.SUCCEEDED, mapTask.getState());
updateStatus(app, reduceAttempt2, Phase.REDUCE);
updateStatus(app, reduceAttempt3, Phase.REDUCE);
//send 3rd fetch failures from reduce to trigger map re execution
sendFetchFailure(app, reduceAttempt3, mapAttempt1, "host3");
//wait for map Task state move back to RUNNING
app.waitForState(mapTask, TaskState.RUNNING);
//map attempt must have become FAILED
Assert.assertEquals("Map TaskAttempt state not correct",
TaskAttemptState.FAILED, mapAttempt1.getState());
Assert.assertEquals(mapAttempt1.getDiagnostics().get(0),
"Too many fetch failures. Failing the attempt. "
+ "Last failure reported by "
+ reduceAttempt3.getID().toString() + " from host host3");
Assert.assertEquals("Num attempts in Map Task not correct",
2, mapTask.getAttempts().size());
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2 = atIt.next();
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
//send the done signal to the second map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(mapAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
// wait for map success
app.waitForState(mapTask, TaskState.SUCCEEDED);
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt.getID(),
TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt2.getID(),
TaskAttemptEventType.TA_DONE));
//send done to reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(reduceAttempt3.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
//previous completion event now becomes obsolete
Assert.assertEquals("Event status not correct",
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Num completion events not correct",
6, events.length);
Assert.assertEquals("Event map attempt id not correct",
mapAttempt1.getID(), events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",
mapAttempt1.getID(), events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",
mapAttempt2.getID(), events[2].getAttemptId());
Assert.assertEquals("Event reduce attempt id not correct",
reduceAttempt.getID(), events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",
TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",
TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
TaskCompletionEvent mapEvents[] =
job.getMapAttemptCompletionEvents(0, 2);
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",
Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
Assert.assertEquals("Unexpected map event", convertedEvents[2],
mapEvents[0]);
}
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
status.counters = new Counters();
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
status.id = attempt.getID();
status.mapFinishTime = 0;
status.phase = phase;
status.progress = 0.5f;
status.shuffleFinishTime = 0;
status.sortFinishTime = 0;
status.stateString = "OK";
status.taskState = attempt.getState();
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
status);
app.getContext().getEventHandler().handle(event);
}
private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt,
TaskAttempt mapAttempt, String hostname) {
app.getContext().getEventHandler().handle(
new JobTaskAttemptFetchFailureEvent(
reduceAttempt.getID(),
Arrays.asList(new TaskAttemptId[] {mapAttempt.getID()}),
hostname));
}
static class MRAppWithHistory extends MRApp {
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, int startCount) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
getStartCount());
return eventHandler;
}
}
}
| 19,873
| 42.110629
| 113
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.Test;
public class TestMRAppComponentDependencies {
@Test(timeout = 20000)
public void testComponentStopOrder() throws Exception {
@SuppressWarnings("resource")
TestMRApp app = new TestMRApp(1, 1, true, this.getClass().getName(), true);
JobImpl job = (JobImpl) app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
int waitTime = 20 * 1000;
while (waitTime > 0 && app.numStops < 2) {
Thread.sleep(100);
waitTime -= 100;
}
// assert JobHistoryEventHandlerStopped and then clientServiceStopped
Assert.assertEquals(1, app.JobHistoryEventHandlerStopped);
Assert.assertEquals(2, app.clientServiceStopped);
}
private final class TestMRApp extends MRApp {
int JobHistoryEventHandlerStopped;
int clientServiceStopped;
int numStops;
public TestMRApp(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
JobHistoryEventHandlerStopped = 0;
clientServiceStopped = 0;
numStops = 0;
}
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState,
String diagnostic) {
UserGroupInformation currentUser = null;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
Job newJob =
new TestJob(getJobId(), getAttemptID(), conf, getDispatcher()
.getEventHandler(), getTaskAttemptListener(), getContext()
.getClock(), getCommitter(), isNewApiCommitter(),
currentUser.getUserName(), getContext(), forcedState, diagnostic);
((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
getDispatcher().register(JobFinishEvent.Type.class,
createJobFinishEventHandler());
return newJob;
}
@Override
protected ClientService createClientService(AppContext context) {
return new MRClientService(context) {
@Override
public void serviceStop() throws Exception {
numStops++;
clientServiceStopped = numStops;
super.serviceStop();
}
};
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
return new JobHistoryEventHandler(context, getStartCount()) {
@Override
public void serviceStop() throws Exception {
numStops++;
JobHistoryEventHandlerStopped = numStops;
super.serviceStop();
}
};
}
}
}
| 4,377
| 34.885246
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockEventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.yarn.event.EventHandler;
public class MockEventHandler implements EventHandler<TaskAttemptEvent> {
@Override
public void handle(TaskAttemptEvent event) {
}
}
| 1,097
| 36.862069
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.Assert;
import org.junit.Test;
/**
* Make sure that the job staging directory clean up happens.
*/
public class TestStagingCleanup {
private Configuration conf = new Configuration();
private FileSystem fs;
private String stagingJobDir = "tmpJobDir";
private Path stagingJobPath = new Path(stagingJobDir);
private final static RecordFactory recordFactory = RecordFactoryProvider.
getRecordFactory(null);
@Test
public void testDeletionofStagingOnUnregistrationFailure()
throws IOException {
testDeletionofStagingOnUnregistrationFailure(2, false);
testDeletionofStagingOnUnregistrationFailure(1, false);
}
@SuppressWarnings("resource")
private void testDeletionofStagingOnUnregistrationFailure(
int maxAttempts, boolean shouldHaveDeleted) throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
TestMRApp appMaster = new TestMRApp(attemptId, null,
JobStateInternal.RUNNING, maxAttempts);
appMaster.crushUnregistration = true;
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
((RunningAppContext) appMaster.getContext()).resetIsLastAMRetry();
if (shouldHaveDeleted) {
Assert.assertEquals(new Boolean(true), appMaster.isLastAMRetry());
verify(fs).delete(stagingJobPath, true);
} else {
Assert.assertEquals(new Boolean(false), appMaster.isLastAMRetry());
verify(fs, never()).delete(stagingJobPath, true);
}
}
@Test
public void testDeletionofStaging() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
JobStateInternal.RUNNING, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
//test whether notifyIsLastAMRetry called
Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs).delete(stagingJobPath, true);
}
@Test (timeout = 30000)
public void testNoDeletionofStagingOnReboot() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
JobStateInternal.REBOOT, MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
//shutdown the job, not the lastRetry
appMaster.shutDownJob();
//test whether notifyIsLastAMRetry called
Assert.assertEquals(false, ((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs, times(0)).delete(stagingJobPath, true);
}
// FIXME:
// Disabled this test because currently, when job state=REBOOT at shutdown
// when lastRetry = true in RM view, cleanup will not do.
// This will be supported after YARN-2261 completed
// @Test (timeout = 30000)
public void testDeletionofStagingOnReboot() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
JobStateInternal.REBOOT, 1); //no retry
appMaster.init(conf);
appMaster.start();
//shutdown the job, is lastRetry
appMaster.shutDownJob();
//test whether notifyIsLastAMRetry called
Assert.assertEquals(true, ((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs).delete(stagingJobPath, true);
}
@Test (timeout = 30000)
public void testDeletionofStagingOnKill() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 0);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc);
appMaster.init(conf);
//simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook =
new MRAppMaster.MRAppMasterShutdownHook(appMaster);
hook.run();
verify(fs, times(0)).delete(stagingJobPath, true);
}
// FIXME:
// Disabled this test because currently, when shutdown hook triggered at
// lastRetry in RM view, cleanup will not do. This should be supported after
// YARN-2261 completed
// @Test (timeout = 30000)
public void testDeletionofStagingOnKillLastTry() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(),
0);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); //no retry
appMaster.init(conf);
assertTrue("appMaster.isLastAMRetry() is false", appMaster.isLastAMRetry());
//simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook =
new MRAppMaster.MRAppMasterShutdownHook(appMaster);
hook.run();
assertTrue("MRAppMaster isn't stopped",
appMaster.isInState(Service.STATE.STOPPED));
verify(fs).delete(stagingJobPath, true);
}
private class TestMRApp extends MRAppMaster {
ContainerAllocator allocator;
boolean testIsLastAMRetry = false;
JobStateInternal jobStateInternal;
boolean crushUnregistration = false;
public TestMRApp(ApplicationAttemptId applicationAttemptId,
ContainerAllocator allocator) {
super(applicationAttemptId, ContainerId.newContainerId(
applicationAttemptId, 1), "testhost", 2222, 3333,
System.currentTimeMillis());
this.allocator = allocator;
this.successfullyUnregistered.set(true);
}
public TestMRApp(ApplicationAttemptId applicationAttemptId,
ContainerAllocator allocator, JobStateInternal jobStateInternal,
int maxAppAttempts) {
this(applicationAttemptId, allocator);
this.jobStateInternal = jobStateInternal;
}
@Override
protected FileSystem getFileSystem(Configuration conf) {
return fs;
}
@Override
protected ContainerAllocator createContainerAllocator(
final ClientService clientService, final AppContext context) {
if(allocator == null) {
if (crushUnregistration) {
return new CustomContainerAllocator(context);
} else {
return super.createContainerAllocator(clientService, context);
}
}
return allocator;
}
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState,
String diagnostic) {
JobImpl jobImpl = mock(JobImpl.class);
when(jobImpl.getInternalState()).thenReturn(this.jobStateInternal);
when(jobImpl.getAllCounters()).thenReturn(new Counters());
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
when(jobImpl.getID()).thenReturn(jobId);
((AppContext) getContext())
.getAllJobs().put(jobImpl.getID(), jobImpl);
return jobImpl;
}
@Override
public void serviceStart() throws Exception {
super.serviceStart();
DefaultMetricsSystem.shutdown();
}
@Override
public void notifyIsLastAMRetry(boolean isLastAMRetry){
testIsLastAMRetry = isLastAMRetry;
super.notifyIsLastAMRetry(isLastAMRetry);
}
@Override
public RMHeartbeatHandler getRMHeartbeatHandler() {
return getStubbedHeartbeatHandler(getContext());
}
@Override
protected void sysexit() {
}
@Override
public Configuration getConfig() {
return conf;
}
@Override
protected void initJobCredentialsAndUGI(Configuration conf) {
}
public boolean getTestIsLastAMRetry(){
return testIsLastAMRetry;
}
private class CustomContainerAllocator extends RMCommunicator
implements ContainerAllocator {
public CustomContainerAllocator(AppContext context) {
super(null, context);
}
@Override
public void serviceInit(Configuration conf) {
}
@Override
public void serviceStart() {
}
@Override
public void serviceStop() {
unregister();
}
@Override
protected void doUnregistration()
throws YarnException, IOException, InterruptedException {
throw new YarnException("test exception");
}
@Override
protected void heartbeat() throws Exception {
}
@Override
public void handle(ContainerAllocatorEvent event) {
}
}
}
private final class MRAppTestCleanup extends MRApp {
int stagingDirCleanedup;
int ContainerAllocatorStopped;
int numStops;
public MRAppTestCleanup(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
stagingDirCleanedup = 0;
ContainerAllocatorStopped = 0;
numStops = 0;
}
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState,
String diagnostic) {
UserGroupInformation currentUser = null;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
Job newJob = new TestJob(getJobId(), getAttemptID(), conf,
getDispatcher().getEventHandler(),
getTaskAttemptListener(), getContext().getClock(),
getCommitter(), isNewApiCommitter(),
currentUser.getUserName(), getContext(),
forcedState, diagnostic);
((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
getDispatcher().register(JobFinishEvent.Type.class,
createJobFinishEventHandler());
return newJob;
}
@Override
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new TestCleanupContainerAllocator();
}
private class TestCleanupContainerAllocator extends AbstractService
implements ContainerAllocator {
private MRAppContainerAllocator allocator;
TestCleanupContainerAllocator() {
super(TestCleanupContainerAllocator.class.getName());
allocator = new MRAppContainerAllocator();
}
@Override
public void handle(ContainerAllocatorEvent event) {
allocator.handle(event);
}
@Override
protected void serviceStop() throws Exception {
numStops++;
ContainerAllocatorStopped = numStops;
super.serviceStop();
}
}
@Override
public RMHeartbeatHandler getRMHeartbeatHandler() {
return getStubbedHeartbeatHandler(getContext());
}
@Override
public void cleanupStagingDir() throws IOException {
numStops++;
stagingDirCleanedup = numStops;
}
@Override
protected void sysexit() {
}
}
private static RMHeartbeatHandler getStubbedHeartbeatHandler(
final AppContext appContext) {
return new RMHeartbeatHandler() {
@Override
public long getLastHeartbeatTime() {
return appContext.getClock().getTime();
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
callback.run();
}
};
}
@Test(timeout=20000)
public void testStagingCleanupOrder() throws Exception {
MRAppTestCleanup app = new MRAppTestCleanup(1, 1, true,
this.getClass().getName(), true);
JobImpl job = (JobImpl)app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
int waitTime = 20 * 1000;
while (waitTime > 0 && app.numStops < 2) {
Thread.sleep(100);
waitTime -= 100;
}
// assert ContainerAllocatorStopped and then tagingDirCleanedup
Assert.assertEquals(1, app.ContainerAllocatorStopped);
Assert.assertEquals(2, app.stagingDirCleanedup);
}
}
| 18,361
| 36.938017
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public class MockJobs extends MockApps {
static final Iterator<JobState> JOB_STATES = Iterators.cycle(JobState
.values());
static final Iterator<TaskState> TASK_STATES = Iterators.cycle(TaskState
.values());
static final Iterator<TaskAttemptState> TASK_ATTEMPT_STATES = Iterators
.cycle(TaskAttemptState.values());
static final Iterator<TaskType> TASK_TYPES = Iterators.cycle(TaskType
.values());
static final Iterator<JobCounter> JOB_COUNTERS = Iterators.cycle(JobCounter
.values());
static final Iterator<FileSystemCounter> FS_COUNTERS = Iterators
.cycle(FileSystemCounter.values());
static final Iterator<TaskCounter> TASK_COUNTERS = Iterators
.cycle(TaskCounter.values());
static final Iterator<String> FS_SCHEMES = Iterators.cycle("FILE", "HDFS",
"LAFS", "CEPH");
static final Iterator<String> USER_COUNTER_GROUPS = Iterators
.cycle(
"com.company.project.subproject.component.subcomponent.UserDefinedSpecificSpecialTask$Counters",
"PigCounters");
static final Iterator<String> USER_COUNTERS = Iterators.cycle("counter1",
"counter2", "counter3");
static final Iterator<Phase> PHASES = Iterators.cycle(Phase.values());
static final Iterator<String> DIAGS = Iterators.cycle(
"Error: java.lang.OutOfMemoryError: Java heap space",
"Lost task tracker: tasktracker.domain/127.0.0.1:40879");
public static final String NM_HOST = "localhost";
public static final int NM_PORT = 1234;
public static final int NM_HTTP_PORT = 8042;
static final int DT = 1000000; // ms
public static String newJobName() {
return newAppName();
}
/**
* Create numJobs in a map with jobs having appId==jobId
*/
public static Map<JobId, Job> newJobs(int numJobs, int numTasksPerJob,
int numAttemptsPerTask) {
Map<JobId, Job> map = Maps.newHashMap();
for (int j = 0; j < numJobs; ++j) {
ApplicationId appID = MockJobs.newAppID(j);
Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
map.put(job.getID(), job);
}
return map;
}
public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
int numTasksPerJob, int numAttemptsPerTask) {
Map<JobId, Job> map = Maps.newHashMap();
for (int j = 0; j < numJobsPerApp; ++j) {
Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
map.put(job.getID(), job);
}
return map;
}
public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
int numTasksPerJob, int numAttemptsPerTask, boolean hasFailedTasks) {
Map<JobId, Job> map = Maps.newHashMap();
for (int j = 0; j < numJobsPerApp; ++j) {
Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask, null,
hasFailedTasks);
map.put(job.getID(), job);
}
return map;
}
public static JobId newJobID(ApplicationId appID, int i) {
JobId id = Records.newRecord(JobId.class);
id.setAppId(appID);
id.setId(i);
return id;
}
public static JobReport newJobReport(JobId id) {
JobReport report = Records.newRecord(JobReport.class);
report.setJobId(id);
report.setSubmitTime(System.currentTimeMillis()-DT);
report
.setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
report.setFinishTime(System.currentTimeMillis()
+ (int) (Math.random() * DT) + 1);
report.setMapProgress((float) Math.random());
report.setReduceProgress((float) Math.random());
report.setJobState(JOB_STATES.next());
return report;
}
public static TaskReport newTaskReport(TaskId id) {
TaskReport report = Records.newRecord(TaskReport.class);
report.setTaskId(id);
report
.setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
report.setFinishTime(System.currentTimeMillis()
+ (int) (Math.random() * DT) + 1);
report.setProgress((float) Math.random());
report.setStatus("Moving average: " + Math.random());
report.setCounters(TypeConverter.toYarn(newCounters()));
report.setTaskState(TASK_STATES.next());
return report;
}
public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
id.getTaskId().getJobId().getAppId(), 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 0);
TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
report.setTaskAttemptId(id);
report
.setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
report.setFinishTime(System.currentTimeMillis()
+ (int) (Math.random() * DT) + 1);
if (id.getTaskId().getTaskType() == TaskType.REDUCE) {
report.setShuffleFinishTime(
(report.getFinishTime() + report.getStartTime()) / 2);
report.setSortFinishTime(
(report.getFinishTime() + report.getShuffleFinishTime()) / 2);
}
report.setPhase(PHASES.next());
report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
report.setProgress((float) Math.random());
report.setCounters(TypeConverter.toYarn(newCounters()));
report.setContainerId(containerId);
report.setDiagnosticInfo(DIAGS.next());
report.setStateString("Moving average " + Math.random());
return report;
}
public static Counters newCounters() {
Counters hc = new Counters();
for (JobCounter c : JobCounter.values()) {
hc.findCounter(c).setValue((long) (Math.random() * 1000));
}
for (TaskCounter c : TaskCounter.values()) {
hc.findCounter(c).setValue((long) (Math.random() * 1000));
}
int nc = FileSystemCounter.values().length * 4;
for (int i = 0; i < nc; ++i) {
for (FileSystemCounter c : FileSystemCounter.values()) {
hc.findCounter(FS_SCHEMES.next(), c).setValue(
(long) (Math.random() * DT));
}
}
for (int i = 0; i < 2 * 3; ++i) {
hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next())
.setValue((long) (Math.random() * 100000));
}
return hc;
}
public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid,
int m) {
Map<TaskAttemptId, TaskAttempt> map = Maps.newHashMap();
for (int i = 0; i < m; ++i) {
TaskAttempt ta = newTaskAttempt(tid, i);
map.put(ta.getID(), ta);
}
return map;
}
public static TaskAttempt newTaskAttempt(TaskId tid, int i) {
final TaskAttemptId taid = Records.newRecord(TaskAttemptId.class);
taid.setTaskId(tid);
taid.setId(i);
final TaskAttemptReport report = newTaskAttemptReport(taid);
return new TaskAttempt() {
@Override
public NodeId getNodeId() throws UnsupportedOperationException{
throw new UnsupportedOperationException();
}
@Override
public TaskAttemptId getID() {
return taid;
}
@Override
public TaskAttemptReport getReport() {
return report;
}
@Override
public long getLaunchTime() {
return report.getStartTime();
}
@Override
public long getFinishTime() {
return report.getFinishTime();
}
@Override
public int getShufflePort() {
return ShuffleHandler.DEFAULT_SHUFFLE_PORT;
}
@Override
public Counters getCounters() {
if (report != null && report.getCounters() != null) {
return new Counters(TypeConverter.fromYarn(report.getCounters()));
}
return null;
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public Phase getPhase() {
return report.getPhase();
}
@Override
public TaskAttemptState getState() {
return report.getTaskAttemptState();
}
@Override
public boolean isFinished() {
switch (report.getTaskAttemptState()) {
case SUCCEEDED:
case FAILED:
case KILLED:
return true;
}
return false;
}
@Override
public ContainerId getAssignedContainerID() {
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(taid.getTaskId().getJobId()
.getAppId(), 0);
ContainerId id = ContainerId.newContainerId(appAttemptId, 0);
return id;
}
@Override
public String getNodeHttpAddress() {
return "localhost:8042";
}
@Override
public List<String> getDiagnostics() {
return Lists.newArrayList(report.getDiagnosticInfo());
}
@Override
public String getAssignedContainerMgrAddress() {
return "localhost:9998";
}
@Override
public long getShuffleFinishTime() {
return report.getShuffleFinishTime();
}
@Override
public long getSortFinishTime() {
return report.getSortFinishTime();
}
@Override
public String getNodeRackName() {
return "/default-rack";
}
};
}
public static Map<TaskId, Task> newTasks(JobId jid, int n, int m, boolean hasFailedTasks) {
Map<TaskId, Task> map = Maps.newHashMap();
for (int i = 0; i < n; ++i) {
Task task = newTask(jid, i, m, hasFailedTasks);
map.put(task.getID(), task);
}
return map;
}
public static Task newTask(JobId jid, int i, int m, final boolean hasFailedTasks) {
final TaskId tid = Records.newRecord(TaskId.class);
tid.setJobId(jid);
tid.setId(i);
tid.setTaskType(TASK_TYPES.next());
final TaskReport report = newTaskReport(tid);
final Map<TaskAttemptId, TaskAttempt> attempts = newTaskAttempts(tid, m);
return new Task() {
@Override
public TaskId getID() {
return tid;
}
@Override
public TaskReport getReport() {
return report;
}
@Override
public Counters getCounters() {
if (hasFailedTasks) {
return null;
}
return new Counters(
TypeConverter.fromYarn(report.getCounters()));
}
@Override
public float getProgress() {
return report.getProgress();
}
@Override
public TaskType getType() {
return tid.getTaskType();
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
return attempts;
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
return attempts.get(attemptID);
}
@Override
public boolean isFinished() {
switch (report.getTaskState()) {
case SUCCEEDED:
case KILLED:
case FAILED:
return true;
}
return false;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
return false;
}
@Override
public TaskState getState() {
return report.getTaskState();
}
};
}
public static Counters getCounters(
Collection<Task> tasks) {
List<Task> completedTasks = new ArrayList<Task>();
for (Task task : tasks) {
if (task.getCounters() != null) {
completedTasks.add(task);
}
}
Counters counters = new Counters();
return JobImpl.incrTaskCounters(counters, completedTasks);
}
static class TaskCount {
int maps;
int reduces;
int completedMaps;
int completedReduces;
void incr(Task task) {
TaskType type = task.getType();
boolean finished = task.isFinished();
if (type == TaskType.MAP) {
if (finished) {
++completedMaps;
}
++maps;
} else if (type == TaskType.REDUCE) {
if (finished) {
++completedReduces;
}
++reduces;
}
}
}
static TaskCount getTaskCount(Collection<Task> tasks) {
TaskCount tc = new TaskCount();
for (Task task : tasks) {
tc.incr(task);
}
return tc;
}
public static Job newJob(ApplicationId appID, int i, int n, int m) {
return newJob(appID, i, n, m, null);
}
public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile) {
return newJob(appID, i, n, m, confFile, false);
}
public static Job newJob(ApplicationId appID, int i, int n, int m,
Path confFile, boolean hasFailedTasks) {
final JobId id = newJobID(appID, i);
final String name = newJobName();
final JobReport report = newJobReport(id);
final Map<TaskId, Task> tasks = newTasks(id, n, m, hasFailedTasks);
final TaskCount taskCount = getTaskCount(tasks.values());
final Counters counters = getCounters(tasks
.values());
final Path configFile = confFile;
Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
final Configuration conf = new Configuration();
conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
JobACLsManager aclsManager = new JobACLsManager(conf);
tmpJobACLs = aclsManager.constructJobACLs(conf);
final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
return new Job() {
@Override
public JobId getID() {
return id;
}
@Override
public String getName() {
return name;
}
@Override
public JobState getState() {
return report.getJobState();
}
@Override
public JobReport getReport() {
return report;
}
@Override
public float getProgress() {
return 0;
}
@Override
public Counters getAllCounters() {
return counters;
}
@Override
public Map<TaskId, Task> getTasks() {
return tasks;
}
@Override
public Task getTask(TaskId taskID) {
return tasks.get(taskID);
}
@Override
public int getTotalMaps() {
return taskCount.maps;
}
@Override
public int getTotalReduces() {
return taskCount.reduces;
}
@Override
public int getCompletedMaps() {
return taskCount.completedMaps;
}
@Override
public int getCompletedReduces() {
return taskCount.completedReduces;
}
@Override
public boolean isUber() {
return false;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
return null;
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return null;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<String> getDiagnostics() {
return Collections.<String> emptyList();
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
return "mock";
}
@Override
public String getQueueName() {
return "mockqueue";
}
@Override
public Path getConfFile() {
return configFile;
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobACLs;
}
@Override
public List<AMInfo> getAMInfos() {
List<AMInfo> amInfoList = new LinkedList<AMInfo>();
amInfoList.add(createAMInfo(1));
amInfoList.add(createAMInfo(2));
return amInfoList;
}
@Override
public Configuration loadConfFile() throws IOException {
FileContext fc = FileContext.getFileContext(configFile.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(configFile), configFile.toString());
return jobConf;
}
@Override
public void setQueueName(String queueName) {
// do nothing
}
};
}
private static AMInfo createAMInfo(int attempt) {
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(100, 1), attempt);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);
}
}
| 19,862
| 29.652778
| 106
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
import com.google.common.collect.Maps;
public class MockAppContext implements AppContext {
final ApplicationAttemptId appAttemptID;
final ApplicationId appID;
final String user = MockJobs.newUserName();
final Map<JobId, Job> jobs;
final long startTime = System.currentTimeMillis();
Set<String> blacklistedNodes;
String queue;
public MockAppContext(int appid) {
appID = MockJobs.newAppID(appid);
appAttemptID = ApplicationAttemptId.newInstance(appID, 0);
jobs = null;
}
public MockAppContext(int appid, int numTasks, int numAttempts, Path confPath) {
appID = MockJobs.newAppID(appid);
appAttemptID = ApplicationAttemptId.newInstance(appID, 0);
Map<JobId, Job> map = Maps.newHashMap();
Job job = MockJobs.newJob(appID, 0, numTasks, numAttempts, confPath);
map.put(job.getID(), job);
jobs = map;
}
public MockAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
this(appid, numJobs, numTasks, numAttempts, false);
}
public MockAppContext(int appid, int numJobs, int numTasks, int numAttempts,
boolean hasFailedTasks) {
appID = MockJobs.newAppID(appid);
appAttemptID = ApplicationAttemptId.newInstance(appID, 0);
jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts, hasFailedTasks);
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
return appAttemptID;
}
@Override
public ApplicationId getApplicationID() {
return appID;
}
@Override
public CharSequence getUser() {
return user;
}
@Override
public Job getJob(JobId jobID) {
return jobs.get(jobID);
}
@Override
public Map<JobId, Job> getAllJobs() {
return jobs; // OK
}
@SuppressWarnings("rawtypes")
@Override
public EventHandler getEventHandler() {
return new MockEventHandler();
}
@Override
public Clock getClock() {
return null;
}
@Override
public String getApplicationName() {
return "TestApp";
}
@Override
public long getStartTime() {
return startTime;
}
@Override
public ClusterInfo getClusterInfo() {
return null;
}
@Override
public Set<String> getBlacklistedNodes() {
return blacklistedNodes;
}
public void setBlacklistedNodes(Set<String> blacklistedNodes) {
this.blacklistedNodes = blacklistedNodes;
}
public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() {
// Not implemented
return null;
}
@Override
public boolean isLastAMRetry() {
return false;
}
@Override
public boolean hasSuccessfullyUnregistered() {
// bogus - Not Required
return true;
}
@Override
public String getNMHostname() {
// bogus - Not Required
return null;
}
@Override
public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
return null;
}
}
| 4,153
| 25.291139
| 83
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestTaskHeartbeatHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Test;
public class TestTaskHeartbeatHandler {
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testTimeout() throws InterruptedException {
EventHandler mockHandler = mock(EventHandler.class);
Clock clock = new SystemClock();
TaskHeartbeatHandler hb = new TaskHeartbeatHandler(mockHandler, clock, 1);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.TASK_TIMEOUT, 10); //10 ms
conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 10); //10 ms
hb.init(conf);
hb.start();
try {
ApplicationId appId = ApplicationId.newInstance(0l, 5);
JobId jobId = MRBuilderUtils.newJobId(appId, 4);
TaskId tid = MRBuilderUtils.newTaskId(jobId, 3, TaskType.MAP);
TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 2);
hb.register(taid);
Thread.sleep(100);
//Events only happen when the task is canceled
verify(mockHandler, times(2)).handle(any(Event.class));
} finally {
hb.stop();
}
}
}
| 2,688
| 36.347222
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.WrappedJvmID;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Assert;
/**
* Mock MRAppMaster. Doesn't start RPC servers.
* No threads are started except of the event Dispatcher thread.
*/
@SuppressWarnings("unchecked")
public class MRApp extends MRAppMaster {
private static final Log LOG = LogFactory.getLog(MRApp.class);
int maps;
int reduces;
private File testWorkDir;
private Path testAbsPath;
private ClusterInfo clusterInfo;
// Queue to pretend the RM assigned us
private String assignedQueue;
public static String NM_HOST = "localhost";
public static int NM_PORT = 1234;
public static int NM_HTTP_PORT = 8042;
//if true, tasks complete automatically as soon as they are launched
protected boolean autoComplete = false;
static ApplicationId applicationId;
static {
applicationId = ApplicationId.newInstance(0, 0);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, Clock clock) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock, null);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, Clock clock, boolean unregistered) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock,
unregistered);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, String assignedQueue) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1,
new SystemClock(), assignedQueue);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, boolean unregistered) {
this(maps, reduces, autoComplete, testName, cleanOnStart, 1, unregistered);
}
@Override
protected void initJobCredentialsAndUGI(Configuration conf) {
// Fake a shuffle secret that normally is provided by the job client.
String shuffleSecret = "fake-shuffle-secret";
TokenCache.setShuffleSecretKey(shuffleSecret.getBytes(), getCredentials());
}
private static ApplicationAttemptId getApplicationAttemptId(
ApplicationId applicationId, int startCount) {
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, startCount);
return applicationAttemptId;
}
private static ContainerId getContainerId(ApplicationId applicationId,
int startCount) {
ApplicationAttemptId appAttemptId =
getApplicationAttemptId(applicationId, startCount);
ContainerId containerId =
ContainerId.newContainerId(appAttemptId, startCount);
return containerId;
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount) {
this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
new SystemClock(), null);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, boolean unregistered) {
this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
new SystemClock(), unregistered);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
this(getApplicationAttemptId(applicationId, startCount), getContainerId(
applicationId, startCount), maps, reduces, autoComplete, testName,
cleanOnStart, startCount, clock, unregistered, null);
}
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, Clock clock, String assignedQueue) {
this(getApplicationAttemptId(applicationId, startCount), getContainerId(
applicationId, startCount), maps, reduces, autoComplete, testName,
cleanOnStart, startCount, clock, true, assignedQueue);
}
public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, boolean unregistered) {
this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
cleanOnStart, startCount, new SystemClock(), unregistered, null);
}
public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount) {
this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
cleanOnStart, startCount, new SystemClock(), true, null);
}
public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, Clock clock, boolean unregistered,
String assignedQueue) {
super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock,
System.currentTimeMillis());
this.testWorkDir = new File("target", testName);
testAbsPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("PathUsed: " + testAbsPath);
if (cleanOnStart) {
testAbsPath = new Path(testWorkDir.getAbsolutePath());
try {
FileContext.getLocalFSFileContext().delete(testAbsPath, true);
} catch (Exception e) {
LOG.warn("COULD NOT CLEANUP: " + testAbsPath, e);
throw new YarnRuntimeException("could not cleanup test dir", e);
}
}
this.maps = maps;
this.reduces = reduces;
this.autoComplete = autoComplete;
// If safeToReportTerminationToUser is set to true, we can verify whether
// the job can reaches the final state when MRAppMaster shuts down.
this.successfullyUnregistered.set(unregistered);
this.assignedQueue = assignedQueue;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
try {
//Create the staging directory if it does not exist
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir = MRApps.getStagingAreaDir(conf, user);
FileSystem fs = getFileSystem(conf);
fs.mkdirs(stagingDir);
} catch (Exception e) {
throw new YarnRuntimeException("Error creating staging dir", e);
}
super.serviceInit(conf);
if (this.clusterInfo != null) {
getContext().getClusterInfo().setMaxContainerCapability(
this.clusterInfo.getMaxContainerCapability());
} else {
getContext().getClusterInfo().setMaxContainerCapability(
Resource.newInstance(10240, 1));
}
}
public Job submit(Configuration conf) throws Exception {
//TODO: fix the bug where the speculator gets events with
//not-fully-constructed objects. For now, disable speculative exec
return submit(conf, false, false);
}
public Job submit(Configuration conf, boolean mapSpeculative,
boolean reduceSpeculative) throws Exception {
String user = conf.get(MRJobConfig.USER_NAME, UserGroupInformation
.getCurrentUser().getShortUserName());
conf.set(MRJobConfig.USER_NAME, user);
conf.set(MRJobConfig.MR_AM_STAGING_DIR, testAbsPath.toString());
conf.setBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
// TODO: fix the bug where the speculator gets events with
// not-fully-constructed objects. For now, disable speculative exec
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, mapSpeculative);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, reduceSpeculative);
init(conf);
start();
DefaultMetricsSystem.shutdown();
Job job = getContext().getAllJobs().values().iterator().next();
if (assignedQueue != null) {
job.setQueueName(assignedQueue);
}
// Write job.xml
String jobFile = MRApps.getJobFile(conf, user,
TypeConverter.fromYarn(job.getID()));
LOG.info("Writing job conf to " + jobFile);
new File(jobFile).getParentFile().mkdirs();
conf.writeXml(new FileOutputStream(jobFile));
return job;
}
public void waitForInternalState(JobImpl job,
JobStateInternal finalState) throws Exception {
int timeoutSecs = 0;
JobStateInternal iState = job.getInternalState();
while (!finalState.equals(iState) && timeoutSecs++ < 20) {
System.out.println("Job Internal State is : " + iState
+ " Waiting for Internal state : " + finalState);
Thread.sleep(500);
iState = job.getInternalState();
}
System.out.println("Task Internal State is : " + iState);
Assert.assertEquals("Task Internal state is not correct (timedout)",
finalState, iState);
}
public void waitForInternalState(TaskImpl task,
TaskStateInternal finalState) throws Exception {
int timeoutSecs = 0;
TaskReport report = task.getReport();
TaskStateInternal iState = task.getInternalState();
while (!finalState.equals(iState) && timeoutSecs++ < 20) {
System.out.println("Task Internal State is : " + iState
+ " Waiting for Internal state : " + finalState + " progress : "
+ report.getProgress());
Thread.sleep(500);
report = task.getReport();
iState = task.getInternalState();
}
System.out.println("Task Internal State is : " + iState);
Assert.assertEquals("Task Internal state is not correct (timedout)",
finalState, iState);
}
public void waitForInternalState(TaskAttemptImpl attempt,
TaskAttemptStateInternal finalState) throws Exception {
int timeoutSecs = 0;
TaskAttemptReport report = attempt.getReport();
TaskAttemptStateInternal iState = attempt.getInternalState();
while (!finalState.equals(iState) && timeoutSecs++ < 20) {
System.out.println("TaskAttempt Internal State is : " + iState
+ " Waiting for Internal state : " + finalState + " progress : "
+ report.getProgress());
Thread.sleep(500);
report = attempt.getReport();
iState = attempt.getInternalState();
}
System.out.println("TaskAttempt Internal State is : " + iState);
Assert.assertEquals("TaskAttempt Internal state is not correct (timedout)",
finalState, iState);
}
public void waitForState(TaskAttempt attempt,
TaskAttemptState finalState) throws Exception {
int timeoutSecs = 0;
TaskAttemptReport report = attempt.getReport();
while (!finalState.equals(report.getTaskAttemptState()) &&
timeoutSecs++ < 20) {
System.out.println("TaskAttempt State is : " + report.getTaskAttemptState() +
" Waiting for state : " + finalState +
" progress : " + report.getProgress());
report = attempt.getReport();
Thread.sleep(500);
}
System.out.println("TaskAttempt State is : " + report.getTaskAttemptState());
Assert.assertEquals("TaskAttempt state is not correct (timedout)",
finalState,
report.getTaskAttemptState());
}
public void waitForState(Task task, TaskState finalState) throws Exception {
int timeoutSecs = 0;
TaskReport report = task.getReport();
while (!finalState.equals(report.getTaskState()) &&
timeoutSecs++ < 20) {
System.out.println("Task State for " + task.getID() + " is : "
+ report.getTaskState() + " Waiting for state : " + finalState
+ " progress : " + report.getProgress());
report = task.getReport();
Thread.sleep(500);
}
System.out.println("Task State is : " + report.getTaskState());
Assert.assertEquals("Task state is not correct (timedout)", finalState,
report.getTaskState());
}
public void waitForState(Job job, JobState finalState) throws Exception {
int timeoutSecs = 0;
JobReport report = job.getReport();
while (!finalState.equals(report.getJobState()) &&
timeoutSecs++ < 20) {
System.out.println("Job State is : " + report.getJobState() +
" Waiting for state : " + finalState +
" map progress : " + report.getMapProgress() +
" reduce progress : " + report.getReduceProgress());
report = job.getReport();
Thread.sleep(500);
}
System.out.println("Job State is : " + report.getJobState());
Assert.assertEquals("Job state is not correct (timedout)", finalState,
job.getState());
}
public void waitForState(Service.STATE finalState) throws Exception {
if (finalState == Service.STATE.STOPPED) {
Assert.assertTrue("Timeout while waiting for MRApp to stop",
waitForServiceToStop(20 * 1000));
} else {
int timeoutSecs = 0;
while (!finalState.equals(getServiceState()) && timeoutSecs++ < 20) {
System.out.println("MRApp State is : " + getServiceState()
+ " Waiting for state : " + finalState);
Thread.sleep(500);
}
System.out.println("MRApp State is : " + getServiceState());
Assert.assertEquals("MRApp state is not correct (timedout)", finalState,
getServiceState());
}
}
public void verifyCompleted() {
for (Job job : getContext().getAllJobs().values()) {
JobReport jobReport = job.getReport();
System.out.println("Job start time :" + jobReport.getStartTime());
System.out.println("Job finish time :" + jobReport.getFinishTime());
Assert.assertTrue("Job start time is not less than finish time",
jobReport.getStartTime() <= jobReport.getFinishTime());
Assert.assertTrue("Job finish time is in future",
jobReport.getFinishTime() <= System.currentTimeMillis());
for (Task task : job.getTasks().values()) {
TaskReport taskReport = task.getReport();
System.out.println("Task start time : " + taskReport.getStartTime());
System.out.println("Task finish time : " + taskReport.getFinishTime());
Assert.assertTrue("Task start time is not less than finish time",
taskReport.getStartTime() <= taskReport.getFinishTime());
for (TaskAttempt attempt : task.getAttempts().values()) {
TaskAttemptReport attemptReport = attempt.getReport();
Assert.assertTrue("Attempt start time is not less than finish time",
attemptReport.getStartTime() <= attemptReport.getFinishTime());
}
}
}
}
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState,
String diagnostic) {
UserGroupInformation currentUser = null;
try {
currentUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
Job newJob = new TestJob(getJobId(), getAttemptID(), conf,
getDispatcher().getEventHandler(),
getTaskAttemptListener(), getContext().getClock(),
getCommitter(), isNewApiCommitter(),
currentUser.getUserName(), getContext(),
forcedState, diagnostic);
((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
getDispatcher().register(JobFinishEvent.Type.class,
new EventHandler<JobFinishEvent>() {
@Override
public void handle(JobFinishEvent event) {
stop();
}
});
return newJob;
}
@Override
protected TaskAttemptFinishingMonitor
createTaskAttemptFinishingMonitor(
EventHandler eventHandler) {
return new TaskAttemptFinishingMonitor(eventHandler) {
@Override
public synchronized void register(TaskAttemptId attemptID) {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
}
};
}
@Override
protected TaskAttemptListener createTaskAttemptListener(AppContext context) {
return new TaskAttemptListener(){
@Override
public InetSocketAddress getAddress() {
return NetUtils.createSocketAddr("localhost:54321");
}
@Override
public void registerLaunchedTask(TaskAttemptId attemptID,
WrappedJvmID jvmID) {
}
@Override
public void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID) {
}
@Override
public void registerPendingTask(org.apache.hadoop.mapred.Task task,
WrappedJvmID jvmID) {
}
};
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {//disable history
return new EventHandler<JobHistoryEvent>() {
@Override
public void handle(JobHistoryEvent event) {
}
};
}
@Override
protected ContainerLauncher createContainerLauncher(AppContext context) {
return new MockContainerLauncher();
}
protected class MockContainerLauncher implements ContainerLauncher {
//We are running locally so set the shuffle port to -1
int shufflePort = -1;
public MockContainerLauncher() {
}
@Override
public void handle(ContainerLauncherEvent event) {
switch (event.getType()) {
case CONTAINER_REMOTE_LAUNCH:
getContext().getEventHandler().handle(
new TaskAttemptContainerLaunchedEvent(event.getTaskAttemptID(),
shufflePort));
attemptLaunched(event.getTaskAttemptID());
break;
case CONTAINER_REMOTE_CLEANUP:
getContext().getEventHandler().handle(
new TaskAttemptEvent(event.getTaskAttemptID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
break;
case CONTAINER_COMPLETED:
break;
}
}
}
protected void attemptLaunched(TaskAttemptId attemptID) {
if (autoComplete) {
// send the done event
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_DONE));
}
}
@Override
protected ContainerAllocator createContainerAllocator(
ClientService clientService, final AppContext context) {
return new MRAppContainerAllocator();
}
protected class MRAppContainerAllocator
implements ContainerAllocator, RMHeartbeatHandler {
private int containerCount;
@Override
public void handle(ContainerAllocatorEvent event) {
ContainerId cId =
ContainerId.newContainerId(getContext().getApplicationAttemptId(),
containerCount++);
NodeId nodeId = NodeId.newInstance(NM_HOST, NM_PORT);
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
Container container = Container.newInstance(cId, nodeId,
NM_HOST + ":" + NM_HTTP_PORT, resource, null, containerToken);
JobID id = TypeConverter.fromYarn(applicationId);
JobId jobId = TypeConverter.toYarn(id);
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.REDUCE,
100)));
getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
new NormalizedResourceEvent(
org.apache.hadoop.mapreduce.TaskType.MAP,
100)));
getContext().getEventHandler().handle(
new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
container, null));
}
@Override
public long getLastHeartbeatTime() {
return getContext().getClock().getTime();
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
callback.run();
}
}
@Override
protected EventHandler<CommitterEvent> createCommitterEventHandler(
AppContext context, final OutputCommitter committer) {
// create an output committer with the task methods stubbed out
OutputCommitter stubbedCommitter = new OutputCommitter() {
@Override
public void setupJob(JobContext jobContext) throws IOException {
committer.setupJob(jobContext);
}
@SuppressWarnings("deprecation")
@Override
public void cleanupJob(JobContext jobContext) throws IOException {
committer.cleanupJob(jobContext);
}
@Override
public void commitJob(JobContext jobContext) throws IOException {
committer.commitJob(jobContext);
}
@Override
public void abortJob(JobContext jobContext, State state)
throws IOException {
committer.abortJob(jobContext, state);
}
@Override
public boolean isRecoverySupported(JobContext jobContext) throws IOException{
return committer.isRecoverySupported(jobContext);
}
@SuppressWarnings("deprecation")
@Override
public boolean isRecoverySupported() {
return committer.isRecoverySupported();
}
@Override
public void setupTask(TaskAttemptContext taskContext)
throws IOException {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taskContext)
throws IOException {
return false;
}
@Override
public void commitTask(TaskAttemptContext taskContext)
throws IOException {
}
@Override
public void abortTask(TaskAttemptContext taskContext)
throws IOException {
}
@Override
public void recoverTask(TaskAttemptContext taskContext)
throws IOException {
}
};
return new CommitterEventHandler(context, stubbedCommitter,
getRMHeartbeatHandler());
}
@Override
protected ClientService createClientService(AppContext context) {
return new MRClientService(context) {
@Override
public InetSocketAddress getBindAddress() {
return NetUtils.createSocketAddr("localhost:9876");
}
@Override
public int getHttpPort() {
return -1;
}
};
}
public void setClusterInfo(ClusterInfo clusterInfo) {
// Only useful if set before a job is started.
if (getServiceState() == Service.STATE.NOTINITED
|| getServiceState() == Service.STATE.INITED) {
this.clusterInfo = clusterInfo;
} else {
throw new IllegalStateException(
"ClusterInfo can only be set before the App is STARTED");
}
}
class TestJob extends JobImpl {
//override the init transition
private final TestInitTransition initTransition = new TestInitTransition(
maps, reduces);
StateMachineFactory<JobImpl, JobStateInternal, JobEventType, JobEvent> localFactory
= stateMachineFactory.addTransition(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
JobEventType.JOB_INIT,
// This is abusive.
initTransition);
private final StateMachine<JobStateInternal, JobEventType, JobEvent>
localStateMachine;
@Override
protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() {
return localStateMachine;
}
@SuppressWarnings("rawtypes")
public TestJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler,
TaskAttemptListener taskAttemptListener, Clock clock,
OutputCommitter committer, boolean newApiCommitter,
String user, AppContext appContext,
JobStateInternal forcedState, String diagnostic) {
super(jobId, getApplicationAttemptId(applicationId, getStartCount()),
conf, eventHandler, taskAttemptListener,
new JobTokenSecretManager(), new Credentials(), clock,
getCompletedTaskFromPreviousRun(), metrics, committer,
newApiCommitter, user, System.currentTimeMillis(), getAllAMInfos(),
appContext, forcedState, diagnostic);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
localStateMachine = localFactory.make(this);
}
}
//Override InitTransition to not look for split files etc
static class TestInitTransition extends JobImpl.InitTransition {
private int maps;
private int reduces;
TestInitTransition(int maps, int reduces) {
this.maps = maps;
this.reduces = reduces;
}
@Override
protected void setup(JobImpl job) throws IOException {
super.setup(job);
job.conf.setInt(MRJobConfig.NUM_REDUCES, reduces);
job.remoteJobConfFile = new Path("test");
}
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[maps];
for (int i = 0; i < maps ; i++) {
splits[i] = new TaskSplitMetaInfo();
}
return splits;
}
}
public static Token newContainerToken(NodeId nodeId,
byte[] password, ContainerTokenIdentifier tokenIdentifier) {
// RPC layer client expects ip:port as service for tokens
InetSocketAddress addr =
NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
// NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
Token containerToken =
Token.newInstance(tokenIdentifier.getBytes(),
ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
.buildTokenService(addr).toString());
return containerToken;
}
public static ContainerId newContainerId(int appId, int appAttemptId,
long timestamp, int containerId) {
ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(applicationId, appAttemptId);
return ContainerId.newContainerId(applicationAttemptId, containerId);
}
public static ContainerTokenIdentifier newContainerTokenIdentifier(
Token containerToken) throws IOException {
org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token =
new org.apache.hadoop.security.token.Token<ContainerTokenIdentifier>(
containerToken.getIdentifier()
.array(), containerToken.getPassword().array(), new Text(
containerToken.getKind()),
new Text(containerToken.getService()));
return token.decodeIdentifier();
}
@Override
protected void shutdownTaskLog() {
// Avoid closing the logging system during unit tests,
// otherwise subsequent MRApp instances in the same test
// will fail to log anything.
}
@Override
protected void shutdownLogManager() {
// Avoid closing the logging system during unit tests,
// otherwise subsequent MRApp instances in the same test
// will fail to log anything.
}
}
| 32,277
| 38.26764
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.Event;
import org.apache.hadoop.mapreduce.jobhistory.EventType;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.MapTaskImpl;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestRecovery {
private static final Log LOG = LogFactory.getLog(TestRecovery.class);
private static Path outputDir = new Path(new File("target",
TestRecovery.class.getName()).getAbsolutePath() +
Path.SEPARATOR + "out");
private static String partFile = "part-r-00000";
private Text key1 = new Text("key1");
private Text key2 = new Text("key2");
private Text val1 = new Text("val1");
private Text val2 = new Text("val2");
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testCrashed() throws Exception {
int runCount = 0;
long am1StartTimeEst = System.currentTimeMillis();
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long jobStartTime = job.getReport().getStartTime();
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.RUNNING, reduceTask.getReport().getTaskState());
/////////// Play some games with the TaskAttempts of the first task //////
//send the fail signal to the 1st map task attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1, TaskAttemptState.FAILED);
int timeOut = 0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2, mapTask1.getAttempts().size());
Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2 = itr.next();
// This attempt will automatically fail because of the way ContainerLauncher
// is setup
// This attempt 'disappears' from JobHistory and so causes MAPREDUCE-3846
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(task1Attempt2.getID(),
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2, TaskAttemptState.FAILED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3 = itr.next();
app.waitForState(task1Attempt3, TaskAttemptState.RUNNING);
//send the kill signal to the 1st map 3rd attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt3.getID(),
TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3, TaskAttemptState.KILLED);
timeOut = 0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4, mapTask1.getAttempts().size());
itr = mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4 = itr.next();
app.waitForState(task1Attempt4, TaskAttemptState.RUNNING);
//send the done signal to the 1st map 4th attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt4.getID(),
TaskAttemptEventType.TA_DONE));
/////////// End of games with the TaskAttempts of the first task //////
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
long task1StartTime = mapTask1.getReport().getStartTime();
long task1FinishTime = mapTask1.getReport().getFinishTime();
//stop the app
app.stop();
//rerun
//in rerun the 1st map will be recovered from previous run
long am2StartTimeEst = System.currentTimeMillis();
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// first map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",
jobStartTime, job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",
task1StartTime, mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",
task1FinishTime, mapTask1.getReport().getFinishTime());
Assert.assertEquals(2, job.getAMInfos().size());
int attemptNum = 1;
// Verify AMInfo
for (AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId()
.getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId()
.getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst
&& am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst
&& am2StartTimeReal <= System.currentTimeMillis());
// TODO Add verification of additional data from jobHistory - whatever was
// available in the failed attempt should be available here
}
/**
* AM with 3 maps and 0 reduce. AM crashes after the first two tasks finishes
* and recovers completely and succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testCrashOfMapsOnlyJob() throws Exception {
int runCount = 0;
MRApp app =
new MRAppWithHistory(3, 0, false, this.getClass().getName(), true,
++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
// all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task mapTask3 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
TaskAttempt task1Attempt =
mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt =
mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt =
mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 1st two maps
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task1Attempt.getID(), TaskAttemptEventType.TA_DONE));
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task2Attempt.getID(), TaskAttemptEventType.TA_DONE));
// wait for first two map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// stop the app
app.stop();
// rerun
// in rerun the 1st two map will be recovered from previous run
app =
new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
// Set num-reduces explicitly in conf as recovery logic depends on it.
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
mapTask3 = it.next();
// first two maps will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
app.waitForState(mapTask3, TaskState.RUNNING);
task3Attempt = mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 3rd map task
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next()
.getID(), TaskAttemptEventType.TA_DONE));
// wait to get it completed
app.waitForState(mapTask3, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
/**
* The class provides a custom implementation of output committer setupTask
* and isRecoverySupported methods, which determines if recovery supported
* based on config property.
*/
public static class TestFileOutputCommitter extends
org.apache.hadoop.mapred.FileOutputCommitter {
@Override
public boolean isRecoverySupported(
org.apache.hadoop.mapred.JobContext jobContext) {
boolean isRecoverySupported = false;
if (jobContext != null && jobContext.getConfiguration() != null) {
isRecoverySupported = jobContext.getConfiguration().getBoolean(
"want.am.recovery", false);
}
return isRecoverySupported;
}
}
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned ON. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovers completely and
* succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(3, 0, false, this.getClass().getName(),
true, ++runCount);
Configuration conf = new Configuration();
conf.setClass("mapred.output.committer.class",
TestFileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean("want.am.recovery", true);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
// all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task mapTask3 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator()
.next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator()
.next();
TaskAttempt task3Attempt = mapTask3.getAttempts().values().iterator()
.next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 1st two maps
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task1Attempt.getID(),
TaskAttemptEventType.TA_DONE));
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task2Attempt.getID(),
TaskAttemptEventType.TA_DONE));
// wait for first two map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// stop the app
app.stop();
// rerun
// in rerun the 1st two map will be recovered from previous run
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setClass("mapred.output.committer.class",
TestFileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
// Set num-reduces explicitly in conf as recovery logic depends on it.
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
mapTask3 = it.next();
// first two maps will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
app.waitForState(mapTask3, TaskState.RUNNING);
task3Attempt = mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 3rd map task
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(mapTask3.getAttempts().values().iterator()
.next().getID(), TaskAttemptEventType.TA_DONE));
// wait to get it completed
app.waitForState(mapTask3, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovery fails and have
* to rerun fully in the second generation and succeeds.
*
* @throws Exception
*/
@Test
public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception {
int runCount = 0;
MRApp app =
new MRAppWithHistory(3, 0, false, this.getClass().getName(), true,
++runCount);
Configuration conf = new Configuration();
conf.setClass("mapred.output.committer.class", TestFileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean("want.am.recovery", false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
// all maps would be running
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task mapTask3 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
TaskAttempt task1Attempt =
mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt =
mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt =
mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to the 1st two maps
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task1Attempt.getID(), TaskAttemptEventType.TA_DONE));
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task2Attempt.getID(), TaskAttemptEventType.TA_DONE));
// wait for first two map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// stop the app
app.stop();
// rerun
// in rerun the 1st two map will be recovered from previous run
app =
new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setClass("mapred.output.committer.class", TestFileOutputCommitter.class,
org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery", false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
// Set num-reduces explicitly in conf as recovery logic depends on it.
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
mapTask3 = it.next();
// first two maps will NOT be recovered, need to send done from them
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
app.waitForState(mapTask3, TaskState.RUNNING);
task3Attempt = mapTask3.getAttempts().values().iterator().next();
// before sending the TA_DONE, event make sure attempt has come to
// RUNNING state
app.waitForState(task3Attempt, TaskAttemptState.RUNNING);
// send the done signal to all 3 tasks map task
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next()
.getID(), TaskAttemptEventType.TA_DONE));
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next()
.getID(), TaskAttemptEventType.TA_DONE));
app
.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next()
.getID(), TaskAttemptEventType.TA_DONE));
// wait to get it completed
app.waitForState(mapTask3, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
@Test
public void testMultipleCrashes() throws Exception {
int runCount = 0;
MRApp app =
new MRAppWithHistory(2, 1, false, this.getClass().getName(), true,
++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.RUNNING, reduceTask.getReport().getTaskState());
//send the done signal to the 1st map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Crash the app
app.stop();
//rerun
//in rerun the 1st map will be recovered from previous run
app =
new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// first map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// Crash the app again.
app.stop();
//rerun
//in rerun the 1st and 2nd map will be recovered from previous run
app =
new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// The maps will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
@Test
public void testOutputRecovery() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(),
true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task reduceTask1 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator()
.next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
//send the done signal to the map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
app.waitForState(reduceTask1, TaskState.RUNNING);
TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
// write output corresponding to reduce1
writeOutput(reduce1Attempt1, conf);
//send the done signal to the 1st reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduce1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for first reduce task to complete
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
//stop the app before the job completes.
app.stop();
//rerun
//in rerun the map will be recovered from previous run
app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
reduceTask1 = it.next();
Task reduceTask2 = it.next();
// map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port after recovery
task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
// first reduce will be recovered, no need to send done
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
app.waitForState(reduceTask2, TaskState.RUNNING);
TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values()
.iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd reduce task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduce2Attempt.getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(reduceTask2, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
@Test
public void testOutputRecoveryMapsOnly() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(),
true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask1 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator()
.next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
// write output corresponding to map1 (This is just to validate that it is
//no included in the output)
writeBadOutput(task1Attempt1, conf);
//send the done signal to the map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
//stop the app before the job completes.
app.stop();
//rerun
//in rerun the map will be recovered from previous run
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask1 = it.next();
// map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port after recovery
task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task2Attempt1 = mapTask2.getAttempts().values().iterator()
.next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt1, TaskAttemptState.RUNNING);
//send the done signal to the map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task2Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for map task to complete
app.waitForState(mapTask2, TaskState.SUCCEEDED);
// Verify the shuffle-port
Assert.assertEquals(5467, task2Attempt1.getShufflePort());
app.waitForState(reduceTask1, TaskState.RUNNING);
TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
// write output corresponding to reduce1
writeOutput(reduce1Attempt1, conf);
//send the done signal to the 1st reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduce1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for first reduce task to complete
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
@Test
public void testRecoveryWithOldCommiter() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(),
true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", false);
conf.setBoolean("mapred.reducer.new-api", false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task reduceTask1 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator()
.next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
//send the done signal to the map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
app.waitForState(reduceTask1, TaskState.RUNNING);
TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
// write output corresponding to reduce1
writeOutput(reduce1Attempt1, conf);
//send the done signal to the 1st reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduce1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
//wait for first reduce task to complete
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
//stop the app before the job completes.
app.stop();
//rerun
//in rerun the map will be recovered from previous run
app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", false);
conf.setBoolean("mapred.reducer.new-api", false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
reduceTask1 = it.next();
Task reduceTask2 = it.next();
// map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
// Verify the shuffle-port after recovery
task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467, task1Attempt1.getShufflePort());
// first reduce will be recovered, no need to send done
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
app.waitForState(reduceTask2, TaskState.RUNNING);
TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values()
.iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd reduce task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduce2Attempt.getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(reduceTask2, TaskState.SUCCEEDED);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
*
* @throws Exception
*/
@Test
public void testSpeculative() throws Exception {
int runCount = 0;
long am1StartTimeEst = System.currentTimeMillis();
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long jobStartTime = job.getReport().getStartTime();
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
// Launch a Speculative Task for the first Task
app.getContext().getEventHandler().handle(
new TaskEvent(mapTask1.getID(), TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut = 0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator<TaskAttempt> t1it = mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1 = t1it.next();
TaskAttempt task1Attempt2 = t1it.next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId = task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
// Launch container for speculative attempt
app.getContext().getEventHandler().handle(
new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(), runCount));
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.RUNNING, reduceTask.getReport().getTaskState());
//send the done signal to the map 1 attempt 1
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt1.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1, TaskAttemptState.SUCCEEDED);
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
long task1StartTime = mapTask1.getReport().getStartTime();
long task1FinishTime = mapTask1.getReport().getFinishTime();
//stop the app
app.stop();
//rerun
//in rerun the 1st map will be recovered from previous run
long am2StartTimeEst = System.currentTimeMillis();
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
// first map will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",
jobStartTime, job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",
task1StartTime, mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",
task1FinishTime, mapTask1.getReport().getFinishTime());
Assert.assertEquals(2, job.getAMInfos().size());
int attemptNum = 1;
// Verify AMInfo
for (AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId()
.getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId()
.getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst
&& am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst
&& am2StartTimeReal <= System.currentTimeMillis());
}
@Test(timeout=30000)
public void testRecoveryWithoutShuffleSecret() throws Exception {
int runCount = 0;
MRApp app = new MRAppNoShuffleSecret(2, 1, false,
this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.RUNNING, reduceTask.getReport().getTaskState());
//send the done signal to the 1st map attempt
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task1Attempt.getID(),
TaskAttemptEventType.TA_DONE));
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
//stop the app
app.stop();
//in recovery the 1st map should NOT be recovered from previous run
//since the shuffle secret was not provided with the job credentials
//and had to be rolled per app attempt
app = new MRAppNoShuffleSecret(2, 1, false,
this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean("mapred.mapper.new-api", true);
conf.setBoolean("mapred.reducer.new-api", true);
conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("No of tasks not correct",
3, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
reduceTask = it.next();
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask2, TaskState.SUCCEEDED);
//verify first map task is still running
app.waitForState(mapTask1, TaskState.RUNNING);
//send the done signal to the 2nd map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask1.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait to get it completed
app.waitForState(mapTask1, TaskState.SUCCEEDED);
//wait for reduce to be running before sending done
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to the reduce
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
@Test
public void testRecoverySuccessAttempt() {
LOG.info("--- START: testRecoverySuccessAttempt ---");
long clusterTimestamp = System.currentTimeMillis();
EventHandler mockEventHandler = mock(EventHandler.class);
MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
mockEventHandler);
TaskId taskId = recoverMapTask.getID();
JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
TaskID taskID = new TaskID(jobID,
org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
//Mock up the TaskAttempts
Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
new HashMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
TaskAttemptState.SUCCEEDED);
mockTaskAttempts.put(taId1, mockTAinfo1);
TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
TaskAttemptState.FAILED);
mockTaskAttempts.put(taId2, mockTAinfo2);
OutputCommitter mockCommitter = mock (OutputCommitter.class);
TaskInfo mockTaskInfo = mock(TaskInfo.class);
when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
when(mockTaskInfo.getTaskId()).thenReturn(taskID);
when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
recoverMapTask.handle(
new TaskRecoverEvent(taskId, mockTaskInfo,mockCommitter, true));
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler,atLeast(1)).handle(
(org.apache.hadoop.yarn.event.Event) arg.capture());
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
new HashMap<TaskAttemptID, TaskAttemptState>();
finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
finalAttemptStates.put(taId2, TaskAttemptState.FAILED);
List<EventType> jobHistoryEvents = new ArrayList<EventType>();
jobHistoryEvents.add(EventType.TASK_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
jobHistoryEvents.add(EventType.TASK_FINISHED);
recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
arg, jobHistoryEvents, 2L, 1L);
}
@Test
public void testRecoveryAllFailAttempts() {
LOG.info("--- START: testRecoveryAllFailAttempts ---");
long clusterTimestamp = System.currentTimeMillis();
EventHandler mockEventHandler = mock(EventHandler.class);
MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
mockEventHandler);
TaskId taskId = recoverMapTask.getID();
JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
TaskID taskID = new TaskID(jobID,
org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
//Mock up the TaskAttempts
Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
new HashMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
TaskAttemptState.FAILED);
mockTaskAttempts.put(taId1, mockTAinfo1);
TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
TaskAttemptState.FAILED);
mockTaskAttempts.put(taId2, mockTAinfo2);
OutputCommitter mockCommitter = mock (OutputCommitter.class);
TaskInfo mockTaskInfo = mock(TaskInfo.class);
when(mockTaskInfo.getTaskStatus()).thenReturn("FAILED");
when(mockTaskInfo.getTaskId()).thenReturn(taskID);
when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
recoverMapTask.handle(
new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler,atLeast(1)).handle(
(org.apache.hadoop.yarn.event.Event) arg.capture());
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
new HashMap<TaskAttemptID, TaskAttemptState>();
finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
finalAttemptStates.put(taId2, TaskAttemptState.FAILED);
List<EventType> jobHistoryEvents = new ArrayList<EventType>();
jobHistoryEvents.add(EventType.TASK_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
jobHistoryEvents.add(EventType.TASK_FAILED);
recoveryChecker(recoverMapTask, TaskState.FAILED, finalAttemptStates,
arg, jobHistoryEvents, 2L, 2L);
}
@Test
public void testRecoveryTaskSuccessAllAttemptsFail() {
LOG.info("--- START: testRecoveryTaskSuccessAllAttemptsFail ---");
long clusterTimestamp = System.currentTimeMillis();
EventHandler mockEventHandler = mock(EventHandler.class);
MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
mockEventHandler);
TaskId taskId = recoverMapTask.getID();
JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
TaskID taskID = new TaskID(jobID,
org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
//Mock up the TaskAttempts
Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
new HashMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
TaskAttemptState.FAILED);
mockTaskAttempts.put(taId1, mockTAinfo1);
TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
TaskAttemptState.FAILED);
mockTaskAttempts.put(taId2, mockTAinfo2);
OutputCommitter mockCommitter = mock (OutputCommitter.class);
TaskInfo mockTaskInfo = mock(TaskInfo.class);
when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
when(mockTaskInfo.getTaskId()).thenReturn(taskID);
when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
recoverMapTask.handle(
new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler,atLeast(1)).handle(
(org.apache.hadoop.yarn.event.Event) arg.capture());
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
new HashMap<TaskAttemptID, TaskAttemptState>();
finalAttemptStates.put(taId1, TaskAttemptState.FAILED);
finalAttemptStates.put(taId2, TaskAttemptState.FAILED);
// check for one new attempt launched since successful attempt not found
TaskAttemptID taId3 = new TaskAttemptID(taskID, 2000);
finalAttemptStates.put(taId3, TaskAttemptState.NEW);
List<EventType> jobHistoryEvents = new ArrayList<EventType>();
jobHistoryEvents.add(EventType.TASK_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FAILED);
recoveryChecker(recoverMapTask, TaskState.RUNNING, finalAttemptStates,
arg, jobHistoryEvents, 2L, 2L);
}
@Test
public void testRecoveryTaskSuccessAllAttemptsSucceed() {
LOG.info("--- START: testRecoveryTaskSuccessAllAttemptsFail ---");
long clusterTimestamp = System.currentTimeMillis();
EventHandler mockEventHandler = mock(EventHandler.class);
MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
mockEventHandler);
TaskId taskId = recoverMapTask.getID();
JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
TaskID taskID = new TaskID(jobID,
org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
//Mock up the TaskAttempts
Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
new HashMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
TaskAttemptState.SUCCEEDED);
mockTaskAttempts.put(taId1, mockTAinfo1);
TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
TaskAttemptState.SUCCEEDED);
mockTaskAttempts.put(taId2, mockTAinfo2);
OutputCommitter mockCommitter = mock (OutputCommitter.class);
TaskInfo mockTaskInfo = mock(TaskInfo.class);
when(mockTaskInfo.getTaskStatus()).thenReturn("SUCCEEDED");
when(mockTaskInfo.getTaskId()).thenReturn(taskID);
when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
recoverMapTask.handle(
new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler,atLeast(1)).handle(
(org.apache.hadoop.yarn.event.Event) arg.capture());
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
new HashMap<TaskAttemptID, TaskAttemptState>();
finalAttemptStates.put(taId1, TaskAttemptState.SUCCEEDED);
finalAttemptStates.put(taId2, TaskAttemptState.SUCCEEDED);
List<EventType> jobHistoryEvents = new ArrayList<EventType>();
jobHistoryEvents.add(EventType.TASK_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_FINISHED);
jobHistoryEvents.add(EventType.TASK_FINISHED);
recoveryChecker(recoverMapTask, TaskState.SUCCEEDED, finalAttemptStates,
arg, jobHistoryEvents, 2L, 0L);
}
@Test
public void testRecoveryAllAttemptsKilled() {
LOG.info("--- START: testRecoveryAllAttemptsKilled ---");
long clusterTimestamp = System.currentTimeMillis();
EventHandler mockEventHandler = mock(EventHandler.class);
MapTaskImpl recoverMapTask = getMockMapTask(clusterTimestamp,
mockEventHandler);
TaskId taskId = recoverMapTask.getID();
JobID jobID = new JobID(Long.toString(clusterTimestamp), 1);
TaskID taskID = new TaskID(jobID,
org.apache.hadoop.mapreduce.TaskType.MAP, taskId.getId());
//Mock up the TaskAttempts
Map<TaskAttemptID, TaskAttemptInfo> mockTaskAttempts =
new HashMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID taId1 = new TaskAttemptID(taskID, 2);
TaskAttemptInfo mockTAinfo1 = getMockTaskAttemptInfo(taId1,
TaskAttemptState.KILLED);
mockTaskAttempts.put(taId1, mockTAinfo1);
TaskAttemptID taId2 = new TaskAttemptID(taskID, 1);
TaskAttemptInfo mockTAinfo2 = getMockTaskAttemptInfo(taId2,
TaskAttemptState.KILLED);
mockTaskAttempts.put(taId2, mockTAinfo2);
OutputCommitter mockCommitter = mock (OutputCommitter.class);
TaskInfo mockTaskInfo = mock(TaskInfo.class);
when(mockTaskInfo.getTaskStatus()).thenReturn("KILLED");
when(mockTaskInfo.getTaskId()).thenReturn(taskID);
when(mockTaskInfo.getAllTaskAttempts()).thenReturn(mockTaskAttempts);
recoverMapTask.handle(
new TaskRecoverEvent(taskId, mockTaskInfo, mockCommitter, true));
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler,atLeast(1)).handle(
(org.apache.hadoop.yarn.event.Event) arg.capture());
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates =
new HashMap<TaskAttemptID, TaskAttemptState>();
finalAttemptStates.put(taId1, TaskAttemptState.KILLED);
finalAttemptStates.put(taId2, TaskAttemptState.KILLED);
List<EventType> jobHistoryEvents = new ArrayList<EventType>();
jobHistoryEvents.add(EventType.TASK_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_STARTED);
jobHistoryEvents.add(EventType.MAP_ATTEMPT_KILLED);
jobHistoryEvents.add(EventType.TASK_FAILED);
recoveryChecker(recoverMapTask, TaskState.KILLED, finalAttemptStates,
arg, jobHistoryEvents, 2L, 0L);
}
private void recoveryChecker(MapTaskImpl checkTask, TaskState finalState,
Map<TaskAttemptID, TaskAttemptState> finalAttemptStates,
ArgumentCaptor<Event> arg, List<EventType> expectedJobHistoryEvents,
long expectedMapLaunches, long expectedFailedMaps) {
assertEquals("Final State of Task", finalState, checkTask.getState());
Map<TaskAttemptId, TaskAttempt> recoveredAttempts =
checkTask.getAttempts();
assertEquals("Expected Number of Task Attempts",
finalAttemptStates.size(), recoveredAttempts.size());
for (TaskAttemptID taID : finalAttemptStates.keySet()) {
assertEquals("Expected Task Attempt State",
finalAttemptStates.get(taID),
recoveredAttempts.get(TypeConverter.toYarn(taID)).getState());
}
Iterator<Event> ie = arg.getAllValues().iterator();
int eventNum = 0;
long totalLaunchedMaps = 0;
long totalFailedMaps = 0;
boolean jobTaskEventReceived = false;
while (ie.hasNext()) {
Object current = ie.next();
++eventNum;
LOG.info(eventNum + " " + current.getClass().getName());
if (current instanceof JobHistoryEvent) {
JobHistoryEvent jhe = (JobHistoryEvent) current;
LOG.info(expectedJobHistoryEvents.get(0).toString() + " " +
jhe.getHistoryEvent().getEventType().toString() + " " +
jhe.getJobID());
assertEquals(expectedJobHistoryEvents.get(0),
jhe.getHistoryEvent().getEventType());
expectedJobHistoryEvents.remove(0);
} else if (current instanceof JobCounterUpdateEvent) {
JobCounterUpdateEvent jcue = (JobCounterUpdateEvent) current;
LOG.info("JobCounterUpdateEvent "
+ jcue.getCounterUpdates().get(0).getCounterKey()
+ " " + jcue.getCounterUpdates().get(0).getIncrementValue());
if (jcue.getCounterUpdates().get(0).getCounterKey() ==
JobCounter.NUM_FAILED_MAPS) {
totalFailedMaps += jcue.getCounterUpdates().get(0)
.getIncrementValue();
} else if (jcue.getCounterUpdates().get(0).getCounterKey() ==
JobCounter.TOTAL_LAUNCHED_MAPS) {
totalLaunchedMaps += jcue.getCounterUpdates().get(0)
.getIncrementValue();
}
} else if (current instanceof JobTaskEvent) {
JobTaskEvent jte = (JobTaskEvent) current;
assertEquals(jte.getState(), finalState);
jobTaskEventReceived = true;
}
}
assertTrue(jobTaskEventReceived || (finalState == TaskState.RUNNING));
assertEquals("Did not process all expected JobHistoryEvents",
0, expectedJobHistoryEvents.size());
assertEquals("Expected Map Launches",
expectedMapLaunches, totalLaunchedMaps);
assertEquals("Expected Failed Maps",
expectedFailedMaps, totalFailedMaps);
}
private MapTaskImpl getMockMapTask(long clusterTimestamp, EventHandler eh) {
ApplicationId appId = ApplicationId.newInstance(clusterTimestamp, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
int partitions = 2;
Path remoteJobConfFile = mock(Path.class);
JobConf conf = new JobConf();
TaskAttemptListener taskAttemptListener = mock(TaskAttemptListener.class);
Token<JobTokenIdentifier> jobToken =
(Token<JobTokenIdentifier>) mock(Token.class);
Credentials credentials = null;
Clock clock = new SystemClock();
int appAttemptId = 3;
MRAppMetrics metrics = mock(MRAppMetrics.class);
Resource minContainerRequirements = mock(Resource.class);
when(minContainerRequirements.getMemory()).thenReturn(1000);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
AppContext appContext = mock(AppContext.class);
when(appContext.getClusterInfo()).thenReturn(clusterInfo);
TaskSplitMetaInfo taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
MapTaskImpl mapTask = new MapTaskImpl(jobId, partitions,
eh, remoteJobConfFile, conf,
taskSplitMetaInfo, taskAttemptListener, jobToken, credentials, clock,
appAttemptId, metrics, appContext);
return mapTask;
}
private TaskAttemptInfo getMockTaskAttemptInfo(TaskAttemptID tai,
TaskAttemptState tas) {
ContainerId ci = mock(ContainerId.class);
Counters counters = mock(Counters.class);
TaskType tt = TaskType.MAP;
long finishTime = System.currentTimeMillis();
TaskAttemptInfo mockTAinfo = mock(TaskAttemptInfo.class);
when(mockTAinfo.getAttemptId()).thenReturn(tai);
when(mockTAinfo.getContainerId()).thenReturn(ci);
when(mockTAinfo.getCounters()).thenReturn(counters);
when(mockTAinfo.getError()).thenReturn("");
when(mockTAinfo.getFinishTime()).thenReturn(finishTime);
when(mockTAinfo.getHostname()).thenReturn("localhost");
when(mockTAinfo.getHttpPort()).thenReturn(23);
when(mockTAinfo.getMapFinishTime()).thenReturn(finishTime - 1000L);
when(mockTAinfo.getPort()).thenReturn(24);
when(mockTAinfo.getRackname()).thenReturn("defaultRack");
when(mockTAinfo.getShuffleFinishTime()).thenReturn(finishTime - 2000L);
when(mockTAinfo.getShufflePort()).thenReturn(25);
when(mockTAinfo.getSortFinishTime()).thenReturn(finishTime - 3000L);
when(mockTAinfo.getStartTime()).thenReturn(finishTime -10000);
when(mockTAinfo.getState()).thenReturn("task in progress");
when(mockTAinfo.getTaskStatus()).thenReturn(tas.toString());
when(mockTAinfo.getTaskType()).thenReturn(tt);
when(mockTAinfo.getTrackerName()).thenReturn("TrackerName");
return mockTAinfo;
}
private void writeBadOutput(TaskAttempt attempt, Configuration conf)
throws Exception {
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attempt.getID()));
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key2, val2);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val2);
theRecordWriter.write(nullWritable, val1);
theRecordWriter.write(key1, nullWritable);
theRecordWriter.write(key2, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key1, val1);
} finally {
theRecordWriter.close(tContext);
}
OutputFormat outputFormat = ReflectionUtils.newInstance(
tContext.getOutputFormatClass(), conf);
OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
committer.commitTask(tContext);
}
private void writeOutput(TaskAttempt attempt, Configuration conf)
throws Exception {
TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attempt.getID()));
TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
RecordWriter theRecordWriter = theOutputFormat
.getRecordWriter(tContext);
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(tContext);
}
OutputFormat outputFormat = ReflectionUtils.newInstance(
tContext.getOutputFormatClass(), conf);
OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
committer.commitTask(tContext);
}
private void validateOutput() throws IOException {
File expectedFile = new File(new Path(outputDir, partFile).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append('\t').append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append('\t').append(val2).append("\n");
String output = slurp(expectedFile);
Assert.assertEquals(output, expectedOutput.toString());
}
public static String slurp(File f) throws IOException {
int len = (int) f.length();
byte[] buf = new byte[len];
FileInputStream in = new FileInputStream(f);
String contents = null;
try {
in.read(buf, 0, len);
contents = new String(buf, "UTF-8");
} finally {
in.close();
}
return contents;
}
static class MRAppWithHistory extends MRApp {
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, int startCount) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
}
@Override
protected ContainerLauncher createContainerLauncher(AppContext context) {
MockContainerLauncher launcher = new MockContainerLauncher() {
@Override
public void handle(ContainerLauncherEvent event) {
TaskAttemptId taskAttemptID = event.getTaskAttemptID();
// Pass everything except the 2nd attempt of the first task.
if (taskAttemptID.getId() != 1
|| taskAttemptID.getTaskId().getId() != 0) {
super.handle(event);
}
}
};
launcher.shufflePort = 5467;
return launcher;
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
getStartCount());
return eventHandler;
}
}
static class MRAppNoShuffleSecret extends MRAppWithHistory {
public MRAppNoShuffleSecret(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, int startCount) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
}
@Override
protected void initJobCredentialsAndUGI(Configuration conf) {
// do NOT put a shuffle secret in the job credentials
}
}
public static void main(String[] arg) throws Exception {
TestRecovery test = new TestRecovery();
test.testCrashed();
test.testMultipleCrashes();
test.testOutputRecovery();
test.testOutputRecoveryMapsOnly();
test.testRecoveryWithOldCommiter();
test.testSpeculative();
test.testRecoveryWithoutShuffleSecret();
test.testRecoverySuccessAttempt();
test.testRecoveryAllFailAttempts();
test.testRecoveryTaskSuccessAllAttemptsFail();
test.testRecoveryTaskSuccessAllAttemptsSucceed();
test.testRecoveryAllAttemptsKilled();
}
}
| 78,379
| 38.486146
| 95
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Iterator;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.junit.Test;
public class TestMRClientService {
private static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
@Test
public void test() throws Exception {
MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
Configuration conf = new Configuration();
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
// send the diagnostic
String diagnostic1 = "Diagnostic1";
String diagnostic2 = "Diagnostic2";
app.getContext().getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(), diagnostic1));
// send the status update
TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
taskAttemptStatus.id = attempt.getID();
taskAttemptStatus.progress = 0.5f;
taskAttemptStatus.stateString = "RUNNING";
taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
taskAttemptStatus.phase = Phase.MAP;
// send the status update
app.getContext().getEventHandler().handle(
new TaskAttemptStatusUpdateEvent(attempt.getID(), taskAttemptStatus));
//verify that all object are fully populated by invoking RPCs.
YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol proxy =
(MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
app.clientService.getBindAddress(), conf);
GetCountersRequest gcRequest =
recordFactory.newRecordInstance(GetCountersRequest.class);
gcRequest.setJobId(job.getID());
Assert.assertNotNull("Counters is null",
proxy.getCounters(gcRequest).getCounters());
GetJobReportRequest gjrRequest =
recordFactory.newRecordInstance(GetJobReportRequest.class);
gjrRequest.setJobId(job.getID());
JobReport jr = proxy.getJobReport(gjrRequest).getJobReport();
verifyJobReport(jr);
GetTaskAttemptCompletionEventsRequest gtaceRequest =
recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
gtaceRequest.setJobId(job.getID());
gtaceRequest.setFromEventId(0);
gtaceRequest.setMaxEvents(10);
Assert.assertNotNull("TaskCompletionEvents is null",
proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
GetDiagnosticsRequest gdRequest =
recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
gdRequest.setTaskAttemptId(attempt.getID());
Assert.assertNotNull("Diagnostics is null",
proxy.getDiagnostics(gdRequest).getDiagnosticsList());
GetTaskAttemptReportRequest gtarRequest =
recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
gtarRequest.setTaskAttemptId(attempt.getID());
TaskAttemptReport tar =
proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
verifyTaskAttemptReport(tar);
GetTaskReportRequest gtrRequest =
recordFactory.newRecordInstance(GetTaskReportRequest.class);
gtrRequest.setTaskId(task.getID());
Assert.assertNotNull("TaskReport is null",
proxy.getTaskReport(gtrRequest).getTaskReport());
GetTaskReportsRequest gtreportsRequest =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.MAP);
Assert.assertNotNull("TaskReports for map is null",
proxy.getTaskReports(gtreportsRequest).getTaskReportList());
gtreportsRequest =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.REDUCE);
Assert.assertNotNull("TaskReports for reduce is null",
proxy.getTaskReports(gtreportsRequest).getTaskReportList());
List<String> diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList();
Assert.assertEquals("Num diagnostics not correct", 1 , diag.size());
Assert.assertEquals("Diag 1 not correct",
diagnostic1, diag.get(0).toString());
TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport();
Assert.assertEquals("Num diagnostics not correct", 1,
taskReport.getDiagnosticsCount());
//send the done signal to the task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
// For invalid jobid, throw IOException
gtreportsRequest =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(TypeConverter.toYarn(JobID
.forName("job_1415730144495_0001")));
gtreportsRequest.setTaskType(TaskType.REDUCE);
try {
proxy.getTaskReports(gtreportsRequest);
fail("IOException not thrown for invalid job id");
} catch (IOException e) {
// Expected
}
}
@Test
public void testViewAclOnlyCannotModify() throws Exception {
final MRAppWithClientService app = new MRAppWithClientService(1, 0, false);
final Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser");
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
UserGroupInformation viewOnlyUser =
UserGroupInformation.createUserForTesting(
"viewonlyuser", new String[] {});
Assert.assertTrue("viewonlyuser cannot view job",
job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB));
Assert.assertFalse("viewonlyuser can modify job",
job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB));
MRClientProtocol client = viewOnlyUser.doAs(
new PrivilegedExceptionAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() throws Exception {
YarnRPC rpc = YarnRPC.create(conf);
return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
app.clientService.getBindAddress(), conf);
}
});
KillJobRequest killJobRequest = recordFactory.newRecordInstance(
KillJobRequest.class);
killJobRequest.setJobId(app.getJobId());
try {
client.killJob(killJobRequest);
fail("viewonlyuser killed job");
} catch (AccessControlException e) {
// pass
}
KillTaskRequest killTaskRequest = recordFactory.newRecordInstance(
KillTaskRequest.class);
killTaskRequest.setTaskId(task.getID());
try {
client.killTask(killTaskRequest);
fail("viewonlyuser killed task");
} catch (AccessControlException e) {
// pass
}
KillTaskAttemptRequest killTaskAttemptRequest =
recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.killTaskAttempt(killTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
} catch (AccessControlException e) {
// pass
}
FailTaskAttemptRequest failTaskAttemptRequest =
recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.failTaskAttempt(failTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
} catch (AccessControlException e) {
// pass
}
}
private void verifyJobReport(JobReport jr) {
Assert.assertNotNull("JobReport is null", jr);
List<AMInfo> amInfos = jr.getAMInfos();
Assert.assertEquals(1, amInfos.size());
Assert.assertEquals(JobState.RUNNING, jr.getJobState());
AMInfo amInfo = amInfos.get(0);
Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(1, amInfo.getContainerId().getApplicationAttemptId()
.getAttemptId());
Assert.assertTrue(amInfo.getStartTime() > 0);
Assert.assertEquals(false, jr.isUber());
}
private void verifyTaskAttemptReport(TaskAttemptReport tar) {
Assert.assertEquals(TaskAttemptState.RUNNING, tar.getTaskAttemptState());
Assert.assertNotNull("TaskAttemptReport is null", tar);
Assert.assertEquals(MRApp.NM_HOST, tar.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT, tar.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, tar.getNodeManagerHttpPort());
Assert.assertEquals(1, tar.getContainerId().getApplicationAttemptId()
.getAttemptId());
}
class MRAppWithClientService extends MRApp {
MRClientService clientService = null;
MRAppWithClientService(int maps, int reduces, boolean autoComplete) {
super(maps, reduces, autoComplete, "MRAppWithClientService", true);
}
@Override
protected ClientService createClientService(AppContext context) {
clientService = new MRClientService(context);
return clientService;
}
}
public static void main(String[] args) throws Exception {
TestMRClientService t = new TestMRClientService();
t.test();
}
}
| 13,699
| 42.35443
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.TaskAttemptListenerImpl;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData;
import org.junit.Test;
/**
* Tests the state machine with respect to Job/Task/TaskAttempt failure
* scenarios.
*/
@SuppressWarnings("unchecked")
public class TestFail {
@Test
//First attempt is failed and second attempt is passed
//The job succeeds.
public void testFailTask() throws Exception {
MRApp app = new MockFirstFailingAttemptMRApp(1, 0);
Configuration conf = new Configuration();
// this test requires two task attempts, but uberization overrides max to 1
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
Map<TaskId,Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
task.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts =
tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", 2, attempts.size());
//one attempt must be failed
//and another must have succeeded
Iterator<TaskAttempt> it = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
it.next().getReport().getTaskAttemptState());
}
@Test
public void testMapFailureMaxPercent() throws Exception {
MRApp app = new MockFirstFailingTaskMRApp(4, 0);
Configuration conf = new Configuration();
//reduce the no of attempts so test run faster
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 20);
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
Job job = app.submit(conf);
app.waitForState(job, JobState.FAILED);
//setting the failure percentage to 25% (1/4 is 25) will
//make the Job successful
app = new MockFirstFailingTaskMRApp(4, 0);
conf = new Configuration();
//reduce the no of attempts so test run faster
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 25);
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
}
@Test
public void testReduceFailureMaxPercent() throws Exception {
MRApp app = new MockFirstFailingTaskMRApp(2, 4);
Configuration conf = new Configuration();
//reduce the no of attempts so test run faster
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);//no failure due to Map
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 20);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
Job job = app.submit(conf);
app.waitForState(job, JobState.FAILED);
//setting the failure percentage to 25% (1/4 is 25) will
//make the Job successful
app = new MockFirstFailingTaskMRApp(2, 4);
conf = new Configuration();
//reduce the no of attempts so test run faster
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 2);
conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);//no failure due to Map
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 25);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
}
@Test
//All Task attempts are timed out, leading to Job failure
public void testTimedOutTask() throws Exception {
MRApp app = new TimeOutTaskMRApp(1, 0);
Configuration conf = new Configuration();
int maxAttempts = 2;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
// disable uberization (requires entire job to be reattempted, so max for
// subtask attempts is overridden to 1)
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.FAILED);
Map<TaskId,Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.FAILED,
task.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts =
tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct", maxAttempts,
attempts.size());
for (TaskAttempt attempt : attempts.values()) {
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
attempt.getReport().getTaskAttemptState());
}
}
@Test
public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app = new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf = new Configuration();
int maxAttempts = 1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
// disable uberization (requires entire job to be reattempted, so max for
// subtask attempts is overridden to 1)
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
app.waitForState(task, TaskState.SCHEDULED);
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
.next().getAttempts();
Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
.size());
TaskAttempt attempt = attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl) attempt,
TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(
new TaskAttemptEvent(attempt.getID(),
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job, JobState.FAILED);
}
static class MRAppWithFailingTaskAndUnusedContainer extends MRApp {
public MRAppWithFailingTaskAndUnusedContainer() {
super(1, 0, false, "TaskFailWithUnsedContainer", true);
}
@Override
protected ContainerLauncher createContainerLauncher(AppContext context) {
return new ContainerLauncherImpl(context) {
@Override
public void handle(ContainerLauncherEvent event) {
switch (event.getType()) {
case CONTAINER_REMOTE_LAUNCH:
super.handle(event); // Unused event and container.
break;
case CONTAINER_REMOTE_CLEANUP:
getContext().getEventHandler().handle(
new TaskAttemptEvent(event.getTaskAttemptID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
break;
case CONTAINER_COMPLETED:
super.handle(event);
}
}
@Override
public ContainerManagementProtocolProxyData getCMProxy(
String containerMgrBindAddr, ContainerId containerId)
throws IOException {
try {
synchronized (this) {
wait(); // Just hang the thread simulating a very slow NM.
}
} catch (InterruptedException e) {
e.printStackTrace();
}
return null;
}
};
};
}
static class TimeOutTaskMRApp extends MRApp {
TimeOutTaskMRApp(int maps, int reduces) {
super(maps, reduces, false, "TimeOutTaskMRApp", true);
}
@Override
protected TaskAttemptListener createTaskAttemptListener(AppContext context) {
//This will create the TaskAttemptListener with TaskHeartbeatHandler
//RPC servers are not started
//task time out is reduced
//when attempt times out, heartbeat handler will send the lost event
//leading to Attempt failure
return new TaskAttemptListenerImpl(getContext(), null, null, null) {
@Override
public void startRpcServer(){};
@Override
public void stopRpcServer(){};
@Override
public InetSocketAddress getAddress() {
return NetUtils.createSocketAddr("localhost", 1234);
}
protected void serviceInit(Configuration conf) throws Exception {
conf.setInt(MRJobConfig.TASK_TIMEOUT, 1*1000);//reduce timeout
conf.setInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 1*1000);
super.serviceInit(conf);
}
};
}
}
//Attempts of first Task are failed
static class MockFirstFailingTaskMRApp extends MRApp {
MockFirstFailingTaskMRApp(int maps, int reduces) {
super(maps, reduces, true, "MockFirstFailingTaskMRApp", true);
}
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0) {//check if it is first task
// send the Fail event
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_FAILMSG));
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_DONE));
}
}
}
//First attempt is failed
static class MockFirstFailingAttemptMRApp extends MRApp {
MockFirstFailingAttemptMRApp(int maps, int reduces) {
super(maps, reduces, true, "MockFirstFailingAttemptMRApp", true);
}
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
//check if it is first task's first attempt
// send the Fail event
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_FAILMSG));
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_DONE));
}
}
}
public static void main(String[] args) throws Exception {
TestFail t = new TestFail();
t.testFailTask();
t.testTimedOutTask();
t.testMapFailureMaxPercent();
t.testReduceFailureMaxPercent();
t.testTaskFailWithUnusedContainer();
}
}
| 13,070
| 38.609091
| 116
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.Test;
public class MRAppBenchmark {
private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
/**
* Runs memory and time benchmark with Mock MRApp.
*/
public void run(MRApp app) throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.WARN);
long startTime = System.currentTimeMillis();
Job job = app.submit(new Configuration());
while (!job.getReport().getJobState().equals(JobState.SUCCEEDED)) {
printStat(job, startTime);
Thread.sleep(2000);
}
printStat(job, startTime);
}
private void printStat(Job job, long startTime) throws Exception {
long currentTime = System.currentTimeMillis();
Runtime.getRuntime().gc();
long mem = Runtime.getRuntime().totalMemory()
- Runtime.getRuntime().freeMemory();
System.out.println("JobState:" + job.getState() +
" CompletedMaps:" + job.getCompletedMaps() +
" CompletedReduces:" + job.getCompletedReduces() +
" Memory(total-free)(KB):" + mem/1024 +
" ElapsedTime(ms):" + (currentTime - startTime));
}
//Throttles the maximum number of concurrent running tasks.
//This affects the memory requirement since
//org.apache.hadoop.mapred.MapTask/ReduceTask is loaded in memory for all
//running task and discarded once the task is launched.
static class ThrottledMRApp extends MRApp {
int maxConcurrentRunningTasks;
volatile int concurrentRunningTasks;
ThrottledMRApp(int maps, int reduces, int maxConcurrentRunningTasks) {
super(maps, reduces, true, "ThrottledMRApp", true);
this.maxConcurrentRunningTasks = maxConcurrentRunningTasks;
}
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
super.attemptLaunched(attemptID);
//the task is launched and sends done immediately
concurrentRunningTasks--;
}
@Override
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new ThrottledContainerAllocator();
}
class ThrottledContainerAllocator extends AbstractService
implements ContainerAllocator {
private int containerCount;
private Thread thread;
private BlockingQueue<ContainerAllocatorEvent> eventQueue =
new LinkedBlockingQueue<ContainerAllocatorEvent>();
public ThrottledContainerAllocator() {
super("ThrottledContainerAllocator");
}
@Override
public void handle(ContainerAllocatorEvent event) {
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
}
@Override
protected void serviceStart() throws Exception {
thread = new Thread(new Runnable() {
@Override
public void run() {
ContainerAllocatorEvent event = null;
while (!Thread.currentThread().isInterrupted()) {
try {
if (concurrentRunningTasks < maxConcurrentRunningTasks) {
event = eventQueue.take();
ContainerId cId =
ContainerId.newContainerId(getContext()
.getApplicationAttemptId(), containerCount++);
//System.out.println("Allocating " + containerCount);
Container container =
recordFactory.newRecordInstance(Container.class);
container.setId(cId);
NodeId nodeId = NodeId.newInstance("dummy", 1234);
container.setNodeId(nodeId);
container.setContainerToken(null);
container.setNodeHttpAddress("localhost:8042");
getContext().getEventHandler()
.handle(
new TaskAttemptContainerAssignedEvent(event
.getAttemptID(), container, null));
concurrentRunningTasks++;
} else {
Thread.sleep(1000);
}
} catch (InterruptedException e) {
System.out.println("Returning, interrupted");
return;
}
}
}
});
thread.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (thread != null) {
thread.interrupt();
}
super.serviceStop();
}
}
}
@Test
public void benchmark1() throws Exception {
int maps = 100; // Adjust for benchmarking. Start with thousands.
int reduces = 0;
System.out.println("Running benchmark with maps:"+maps +
" reduces:"+reduces);
run(new MRApp(maps, reduces, true, this.getClass().getName(), true) {
@Override
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new RMContainerAllocator(clientService, context) {
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return new ApplicationMasterProtocol() {
@Override
public RegisterApplicationMasterResponse
registerApplicationMaster(
RegisterApplicationMasterRequest request)
throws IOException {
RegisterApplicationMasterResponse response =
Records.newRecord(RegisterApplicationMasterResponse.class);
response.setMaximumResourceCapability(Resource.newInstance(
10240, 1));
return response;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request)
throws IOException {
FinishApplicationMasterResponse response =
Records.newRecord(FinishApplicationMasterResponse.class);
return response;
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws IOException {
AllocateResponse response =
Records.newRecord(AllocateResponse.class);
List<ResourceRequest> askList = request.getAskList();
List<Container> containers = new ArrayList<Container>();
for (ResourceRequest req : askList) {
if (!ResourceRequest.isAnyLocation(req.getResourceName())) {
continue;
}
int numContainers = req.getNumContainers();
for (int i = 0; i < numContainers; i++) {
ContainerId containerId =
ContainerId.newContainerId(
getContext().getApplicationAttemptId(),
request.getResponseId() + i);
containers.add(Container.newInstance(containerId,
NodeId.newInstance(
"host" + containerId.getContainerId(), 2345),
"host" + containerId.getContainerId() + ":5678",
req.getCapability(), req.getPriority(), null));
}
}
response.setAllocatedContainers(containers);
response.setResponseId(request.getResponseId() + 1);
response.setNumClusterNodes(350);
return response;
}
};
}
};
}
});
}
@Test
public void benchmark2() throws Exception {
int maps = 100; // Adjust for benchmarking, start with a couple of thousands
int reduces = 50;
int maxConcurrentRunningTasks = 500;
System.out.println("Running benchmark with throttled running tasks with " +
"maxConcurrentRunningTasks:" + maxConcurrentRunningTasks +
" maps:" + maps + " reduces:" + reduces);
run(new ThrottledMRApp(maps, reduces, maxConcurrentRunningTasks));
}
public static void main(String[] args) throws Exception {
MRAppBenchmark benchmark = new MRAppBenchmark();
benchmark.benchmark1();
benchmark.benchmark2();
}
}
| 11,148
| 39.104317
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.times;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicLong;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.EventType;
import org.apache.hadoop.mapreduce.jobhistory.EventWriter;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.split.JobSplitWriter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
public class TestMRAppMaster {
private static final Log LOG = LogFactory.getLog(TestMRAppMaster.class);
static String stagingDir = "staging/";
private static FileContext localFS = null;
private static final File testDir = new File("target",
TestMRAppMaster.class.getName() + "-tmpDir").getAbsoluteFile();
@BeforeClass
public static void setup() throws AccessControlException,
FileNotFoundException, IllegalArgumentException, IOException {
//Do not error out if metrics are inited multiple times
DefaultMetricsSystem.setMiniClusterMode(true);
File dir = new File(stagingDir);
stagingDir = dir.getAbsolutePath();
localFS = FileContext.getLocalFSFileContext();
localFS.delete(new Path(testDir.getAbsolutePath()), true);
testDir.mkdir();
}
@Before
public void cleanup() throws IOException {
File dir = new File(stagingDir);
if(dir.exists()) {
FileUtils.deleteDirectory(dir);
}
dir.mkdirs();
}
@Test
public void testMRAppMasterForDifferentUser() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000001";
String containerIdStr = "container_1317529182569_0004_000001_1";
String userName = "TestAppMasterUser";
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMasterTest appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis());
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
Path userPath = new Path(stagingDir, userName);
Path userStagingPath = new Path(userPath, ".staging");
assertEquals(userStagingPath.toString(),
appMaster.stagingDirPath.toString());
}
@Test
public void testMRAppMasterMidLock() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
FileSystem fs = FileSystem.get(conf);
//Create the file, but no end file so we should unregister with an error.
fs.create(start).close();
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR, appMaster.forcedState);
appMaster.stop();
// verify the final status is FAILED
verifyFailedStatus((MRAppMasterTest)appMaster, "FAILED");
}
@Test
public void testMRAppMasterJobLaunchTime() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
File dir = new File(MRApps.getStagingAreaDir(conf, userName).toString(),
jobId.toString());
dir.mkdirs();
File historyFile = new File(JobHistoryUtils.getStagingJobHistoryFile(
new Path(dir.toURI().toString()), jobId,
(applicationAttemptId.getAttemptId() - 1)).toUri().getRawPath());
historyFile.createNewFile();
FSDataOutputStream out = new FSDataOutputStream(
new FileOutputStream(historyFile), null);
EventWriter writer = new EventWriter(out, EventWriter.WriteMode.JSON);
writer.close();
FileSystem fs = FileSystem.get(conf);
JobSplitWriter.createSplitFiles(new Path(dir.getAbsolutePath()), conf,
fs, new org.apache.hadoop.mapred.InputSplit[0]);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMasterTestLaunchTime appMaster =
new MRAppMasterTestLaunchTime(applicationAttemptId, containerId,
"host", -1, -1, System.currentTimeMillis());
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
appMaster.stop();
assertTrue("Job launch time should not be negative.",
appMaster.jobLaunchTime.get() >= 0);
}
@Test
public void testMRAppMasterSuccessLock() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
Path end = MRApps.getEndJobCommitSuccessFile(conf, userName, jobId);
FileSystem fs = FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.SUCCEEDED, appMaster.forcedState);
appMaster.stop();
// verify the final status is SUCCEEDED
verifyFailedStatus((MRAppMasterTest)appMaster, "SUCCEEDED");
}
@Test
public void testMRAppMasterFailLock() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start = MRApps.getStartJobCommitFile(conf, userName, jobId);
Path end = MRApps.getEndJobCommitFailureFile(conf, userName, jobId);
FileSystem fs = FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.FAILED, appMaster.forcedState);
appMaster.stop();
// verify the final status is FAILED
verifyFailedStatus((MRAppMasterTest)appMaster, "FAILED");
}
@Test
public void testMRAppMasterMissingStaging() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
//Delete the staging directory
File dir = new File(stagingDir);
if(dir.exists()) {
FileUtils.deleteDirectory(dir);
}
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, false);
boolean caught = false;
try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
} catch (IOException e) {
//The IO Exception is expected
LOG.info("Caught expected Exception", e);
caught = true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
//Copying the history file is disabled, but it is not really visible from
//here
assertEquals(JobStateInternal.ERROR, appMaster.forcedState);
appMaster.stop();
}
@Test (timeout = 30000)
public void testMRAppMasterMaxAppAttempts() throws IOException,
InterruptedException {
// No matter what's the maxAppAttempt or attempt id, the isLastRetry always
// equals to false
Boolean[] expectedBools = new Boolean[]{ false, false, false };
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
File stagingDir =
new File(MRApps.getStagingAreaDir(conf, userName).toString());
stagingDir.mkdirs();
for (int i = 0; i < expectedBools.length; ++i) {
MRAppMasterTest appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, true);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
assertEquals("isLastAMRetry is correctly computed.", expectedBools[i],
appMaster.isLastAMRetry());
}
}
// A dirty hack to modify the env of the current JVM itself - Dirty, but
// should be okay for testing.
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void setNewEnvironmentHack(Map<String, String> newenv)
throws Exception {
try {
Class<?> cl = Class.forName("java.lang.ProcessEnvironment");
Field field = cl.getDeclaredField("theEnvironment");
field.setAccessible(true);
Map<String, String> env = (Map<String, String>) field.get(null);
env.clear();
env.putAll(newenv);
Field ciField = cl.getDeclaredField("theCaseInsensitiveEnvironment");
ciField.setAccessible(true);
Map<String, String> cienv = (Map<String, String>) ciField.get(null);
cienv.clear();
cienv.putAll(newenv);
} catch (NoSuchFieldException e) {
Class[] classes = Collections.class.getDeclaredClasses();
Map<String, String> env = System.getenv();
for (Class cl : classes) {
if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Object obj = field.get(env);
Map<String, String> map = (Map<String, String>) obj;
map.clear();
map.putAll(newenv);
}
}
}
}
@Test
public void testMRAppMasterCredentials() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
// Simulate credentials passed to AM via client->RM->NM
Credentials credentials = new Credentials();
byte[] identifier = "MyIdentifier".getBytes();
byte[] password = "MyPassword".getBytes();
Text kind = new Text("MyTokenKind");
Text service = new Text("host:port");
Token<? extends TokenIdentifier> myToken =
new Token<TokenIdentifier>(identifier, password, kind, service);
Text tokenAlias = new Text("myToken");
credentials.addToken(tokenAlias, myToken);
Text appTokenService = new Text("localhost:0");
Token<AMRMTokenIdentifier> appToken =
new Token<AMRMTokenIdentifier>(identifier, password,
AMRMTokenIdentifier.KIND_NAME, appTokenService);
credentials.addToken(appTokenService, appToken);
Text keyAlias = new Text("mySecretKeyAlias");
credentials.addSecretKey(keyAlias, "mySecretKey".getBytes());
Token<? extends TokenIdentifier> storedToken =
credentials.getToken(tokenAlias);
JobConf conf = new JobConf();
Path tokenFilePath = new Path(testDir.getAbsolutePath(), "tokens-file");
Map<String, String> newEnv = new HashMap<String, String>();
newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, tokenFilePath
.toUri().getPath());
setNewEnvironmentHack(newEnv);
credentials.writeTokenStorageFile(tokenFilePath, conf);
ApplicationId appId = ApplicationId.newInstance(12345, 56);
ApplicationAttemptId applicationAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId =
ContainerId.newContainerId(applicationAttemptId, 546);
String userName = UserGroupInformation.getCurrentUser().getShortUserName();
// Create staging dir, so MRAppMaster doesn't barf.
File stagingDir =
new File(MRApps.getStagingAreaDir(conf, userName).toString());
stagingDir.mkdirs();
// Set login-user to null as that is how real world MRApp starts with.
// This is null is the reason why token-file is read by UGI.
UserGroupInformation.setLoginUser(null);
MRAppMasterTest appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, true);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
// Now validate the task credentials
Credentials appMasterCreds = appMaster.getCredentials();
Assert.assertNotNull(appMasterCreds);
Assert.assertEquals(1, appMasterCreds.numberOfSecretKeys());
Assert.assertEquals(1, appMasterCreds.numberOfTokens());
// Validate the tokens - app token should not be present
Token<? extends TokenIdentifier> usedToken =
appMasterCreds.getToken(tokenAlias);
Assert.assertNotNull(usedToken);
Assert.assertEquals(storedToken, usedToken);
// Validate the keys
byte[] usedKey = appMasterCreds.getSecretKey(keyAlias);
Assert.assertNotNull(usedKey);
Assert.assertEquals("mySecretKey", new String(usedKey));
// The credentials should also be added to conf so that OuputCommitter can
// access it - app token should not be present
Credentials confCredentials = conf.getCredentials();
Assert.assertEquals(1, confCredentials.numberOfSecretKeys());
Assert.assertEquals(1, confCredentials.numberOfTokens());
Assert.assertEquals(storedToken, confCredentials.getToken(tokenAlias));
Assert.assertEquals("mySecretKey",
new String(confCredentials.getSecretKey(keyAlias)));
// Verify the AM's ugi - app token should be present
Credentials ugiCredentials = appMaster.getUgi().getCredentials();
Assert.assertEquals(1, ugiCredentials.numberOfSecretKeys());
Assert.assertEquals(2, ugiCredentials.numberOfTokens());
Assert.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias));
Assert.assertEquals(appToken, ugiCredentials.getToken(appTokenService));
Assert.assertEquals("mySecretKey",
new String(ugiCredentials.getSecretKey(keyAlias)));
}
@Test
public void testMRAppMasterShutDownJob() throws Exception,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
JobConf conf = new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
File stagingDir =
new File(MRApps.getStagingAreaDir(conf, userName).toString());
stagingDir.mkdirs();
MRAppMasterTest appMaster =
spy(new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false, true));
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
doReturn(conf).when(appMaster).getConfig();
appMaster.isLastAMRetry = true;
doNothing().when(appMaster).serviceStop();
// Test normal shutdown.
appMaster.shutDownJob();
Assert.assertTrue("Expected shutDownJob to terminate.",
ExitUtil.terminateCalled());
Assert.assertEquals("Expected shutDownJob to exit with status code of 0.",
0, ExitUtil.getFirstExitException().status);
// Test shutdown with exception.
ExitUtil.resetFirstExitException();
String msg = "Injected Exception";
doThrow(new RuntimeException(msg))
.when(appMaster).notifyIsLastAMRetry(anyBoolean());
appMaster.shutDownJob();
assertTrue("Expected message from ExitUtil.ExitException to be " + msg,
ExitUtil.getFirstExitException().getMessage().contains(msg));
Assert.assertEquals("Expected shutDownJob to exit with status code of 1.",
1, ExitUtil.getFirstExitException().status);
}
private void verifyFailedStatus(MRAppMasterTest appMaster,
String expectedJobState) {
ArgumentCaptor<JobHistoryEvent> captor = ArgumentCaptor
.forClass(JobHistoryEvent.class);
// handle two events: AMStartedEvent and JobUnsuccessfulCompletionEvent
verify(appMaster.spyHistoryService, times(2))
.handleEvent(captor.capture());
HistoryEvent event = captor.getValue().getHistoryEvent();
assertTrue(event instanceof JobUnsuccessfulCompletionEvent);
assertEquals(((JobUnsuccessfulCompletionEvent) event).getStatus()
, expectedJobState);
}
}
class MRAppMasterTest extends MRAppMaster {
Path stagingDirPath;
private Configuration conf;
private boolean overrideInit;
private boolean overrideStart;
ContainerAllocator mockContainerAllocator;
CommitterEventHandler mockCommitterEventHandler;
RMHeartbeatHandler mockRMHeartbeatHandler;
JobHistoryEventHandler spyHistoryService;
public MRAppMasterTest(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String host, int port, int httpPort,
long submitTime) {
this(applicationAttemptId, containerId, host, port, httpPort,
submitTime, true, true);
}
public MRAppMasterTest(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String host, int port, int httpPort,
long submitTime, boolean overrideInit,
boolean overrideStart) {
super(applicationAttemptId, containerId, host, port, httpPort, submitTime);
this.overrideInit = overrideInit;
this.overrideStart = overrideStart;
mockContainerAllocator = mock(ContainerAllocator.class);
mockCommitterEventHandler = mock(CommitterEventHandler.class);
mockRMHeartbeatHandler = mock(RMHeartbeatHandler.class);
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
if (!overrideInit) {
super.serviceInit(conf);
}
this.conf = conf;
}
@Override
protected ContainerAllocator createContainerAllocator(
final ClientService clientService, final AppContext context) {
return mockContainerAllocator;
}
@Override
protected EventHandler<CommitterEvent> createCommitterEventHandler(
AppContext context, OutputCommitter committer) {
return mockCommitterEventHandler;
}
@Override
protected RMHeartbeatHandler getRMHeartbeatHandler() {
return mockRMHeartbeatHandler;
}
@Override
protected void serviceStart() throws Exception {
if (overrideStart) {
try {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
stagingDirPath = MRApps.getStagingAreaDir(conf, user);
} catch (Exception e) {
fail(e.getMessage());
}
} else {
super.serviceStart();
}
}
@Override
public Credentials getCredentials() {
return super.getCredentials();
}
public UserGroupInformation getUgi() {
return currentUser;
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
spyHistoryService =
Mockito.spy((JobHistoryEventHandler) super
.createJobHistoryHandler(context));
spyHistoryService.setForcejobCompletion(this.isLastAMRetry);
return spyHistoryService;
}
}
class MRAppMasterTestLaunchTime extends MRAppMasterTest {
final AtomicLong jobLaunchTime = new AtomicLong(0L);
public MRAppMasterTestLaunchTime(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String host, int port, int httpPort,
long submitTime) {
super(applicationAttemptId, containerId, host, port, httpPort,
submitTime, false, false);
}
@Override
protected EventHandler<CommitterEvent> createCommitterEventHandler(
AppContext context, OutputCommitter committer) {
return new CommitterEventHandler(context, committer,
getRMHeartbeatHandler()) {
@Override
public void handle(CommitterEvent event) {
}
};
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
return new JobHistoryEventHandler(context, getStartCount()) {
@Override
public void handle(JobHistoryEvent event) {
if (event.getHistoryEvent().getEventType() == EventType.JOB_INITED) {
JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
jobLaunchTime.set(jie.getLaunchTime());
}
super.handle(event);
}
};
}
}
| 27,527
| 40.025335
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKill.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.junit.Test;
/**
* Tests the state machine with respect to Job/Task/TaskAttempt kill scenarios.
*
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestKill {
@Test
public void testKillJob() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MRApp app = new BlockingMRApp(1, 0, latch);
//this will start the job but job won't complete as task is
//blocked
Job job = app.submit(new Configuration());
//wait and vailidate for Job to become RUNNING
app.waitForState(job, JobState.RUNNING);
//send the kill signal to Job
app.getContext().getEventHandler().handle(
new JobEvent(job.getID(), JobEventType.JOB_KILL));
//unblock Task
latch.countDown();
//wait and validate for Job to be KILLED
app.waitForState(job, JobState.KILLED);
Map<TaskId,Task> tasks = job.getTasks();
Assert.assertEquals("No of tasks is not correct", 1,
tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.KILLED,
task.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts =
tasks.values().iterator().next().getAttempts();
Assert.assertEquals("No of attempts is not correct", 1,
attempts.size());
Iterator<TaskAttempt> it = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
it.next().getReport().getTaskAttemptState());
}
@Test
public void testKillTask() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MRApp app = new BlockingMRApp(2, 0, latch);
//this will start the job but job won't complete as Task is blocked
Job job = app.submit(new Configuration());
//wait and vailidate for Job to become RUNNING
app.waitForState(job, JobState.RUNNING);
Map<TaskId,Task> tasks = job.getTasks();
Assert.assertEquals("No of tasks is not correct", 2,
tasks.size());
Iterator<Task> it = tasks.values().iterator();
Task task1 = it.next();
Task task2 = it.next();
//send the kill signal to the first Task
app.getContext().getEventHandler().handle(
new TaskEvent(task1.getID(), TaskEventType.T_KILL));
//unblock Task
latch.countDown();
//wait and validate for Job to become SUCCEEDED
app.waitForState(job, JobState.SUCCEEDED);
//first Task is killed and second is Succeeded
//Job is succeeded
Assert.assertEquals("Task state not correct", TaskState.KILLED,
task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
task2.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts = task1.getAttempts();
Assert.assertEquals("No of attempts is not correct", 1,
attempts.size());
Iterator<TaskAttempt> iter = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
iter.next().getReport().getTaskAttemptState());
attempts = task2.getAttempts();
Assert.assertEquals("No of attempts is not correct", 1,
attempts.size());
iter = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
iter.next().getReport().getTaskAttemptState());
}
@Test
public void testKillTaskWait() throws Exception {
final Dispatcher dispatcher = new AsyncDispatcher() {
private TaskAttemptEvent cachedKillEvent;
@Override
protected void dispatch(Event event) {
if (event instanceof TaskAttemptEvent) {
TaskAttemptEvent killEvent = (TaskAttemptEvent) event;
if (killEvent.getType() == TaskAttemptEventType.TA_KILL) {
TaskAttemptId taID = killEvent.getTaskAttemptID();
if (taID.getTaskId().getTaskType() == TaskType.REDUCE
&& taID.getTaskId().getId() == 0 && taID.getId() == 0) {
// Task is asking the reduce TA to kill itself. 'Create' a race
// condition. Make the task succeed and then inform the task that
// TA has succeeded. Once Task gets the TA succeeded event at
// KILL_WAIT, then relay the actual kill signal to TA
super.dispatch(new TaskAttemptEvent(taID,
TaskAttemptEventType.TA_DONE));
super.dispatch(new TaskAttemptEvent(taID,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
super.dispatch(new TaskTAttemptEvent(taID,
TaskEventType.T_ATTEMPT_SUCCEEDED));
this.cachedKillEvent = killEvent;
return;
}
}
} else if (event instanceof TaskEvent) {
TaskEvent taskEvent = (TaskEvent) event;
if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED
&& this.cachedKillEvent != null) {
// When the TA comes and reports that it is done, send the
// cachedKillEvent
super.dispatch(this.cachedKillEvent);
return;
}
}
super.dispatch(event);
}
};
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
@Override
public Dispatcher createDispatcher() {
return dispatcher;
}
};
Job job = app.submit(new Configuration());
JobId jobId = app.getJobId();
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
// Finish map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapAttempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask, TaskState.SUCCEEDED);
// Now kill the job
app.getContext().getEventHandler()
.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
app.waitForInternalState((JobImpl) job, JobStateInternal.KILLED);
}
@Test
public void testKillTaskWaitKillJobAfterTA_DONE() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final Dispatcher dispatcher = new MyAsyncDispatch(latch, TaskAttemptEventType.TA_DONE);
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
@Override
public Dispatcher createDispatcher() {
return dispatcher;
}
};
Job job = app.submit(new Configuration());
JobId jobId = app.getJobId();
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
// The order in the dispatch event queue, from first to last
// TA_DONE
// JobEventType.JOB_KILL
// TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
// TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
// TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
// Finish map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapAttempt.getID(),
TaskAttemptEventType.TA_DONE));
// Now kill the job
app.getContext().getEventHandler()
.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
//unblock
latch.countDown();
app.waitForInternalState((JobImpl)job, JobStateInternal.KILLED);
}
@Test
public void testKillTaskWaitKillJobBeforeTA_DONE() throws Exception {
CountDownLatch latch = new CountDownLatch(1);
final Dispatcher dispatcher = new MyAsyncDispatch(latch, JobEventType.JOB_KILL);
MRApp app = new MRApp(1, 1, false, this.getClass().getName(), true) {
@Override
public Dispatcher createDispatcher() {
return dispatcher;
}
};
Job job = app.submit(new Configuration());
JobId jobId = app.getJobId();
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 2, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
Task reduceTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
app.waitForState(reduceTask, TaskState.RUNNING);
TaskAttempt mapAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt, TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt = reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
// The order in the dispatch event queue, from first to last
// JobEventType.JOB_KILL
// TA_DONE
// TaskEventType.T_KILL ( from JobEventType.JOB_KILL handling )
// TaskAttemptEventType.TA_CONTAINER_COMPLETED ( from TA_DONE handling )
// TaskAttemptEventType.TA_KILL ( from TaskEventType.T_KILL handling )
// TaskEventType.T_ATTEMPT_SUCCEEDED ( from TA_CONTAINER_COMPLETED handling )
// TaskEventType.T_ATTEMPT_KILLED ( from TA_KILL handling )
// Now kill the job
app.getContext().getEventHandler()
.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
// Finish map
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapAttempt.getID(),
TaskAttemptEventType.TA_DONE));
//unblock
latch.countDown();
app.waitForInternalState((JobImpl)job, JobStateInternal.KILLED);
}
static class MyAsyncDispatch extends AsyncDispatcher {
private CountDownLatch latch;
private TaskAttemptEventType attemptEventTypeToWait;
private JobEventType jobEventTypeToWait;
MyAsyncDispatch(CountDownLatch latch, TaskAttemptEventType attemptEventTypeToWait) {
super();
this.latch = latch;
this.attemptEventTypeToWait = attemptEventTypeToWait;
}
MyAsyncDispatch(CountDownLatch latch, JobEventType jobEventTypeToWait) {
super();
this.latch = latch;
this.jobEventTypeToWait = jobEventTypeToWait;
}
@Override
protected void dispatch(Event event) {
if (event instanceof TaskAttemptEvent) {
TaskAttemptEvent attemptEvent = (TaskAttemptEvent) event;
TaskAttemptId attemptID = ((TaskAttemptEvent) event).getTaskAttemptID();
if (attemptEvent.getType() == this.attemptEventTypeToWait
&& attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0 ) {
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} else if ( event instanceof JobEvent) {
JobEvent jobEvent = (JobEvent) event;
if (jobEvent.getType() == this.jobEventTypeToWait) {
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
super.dispatch(event);
}
}
@Test
public void testKillTaskAttempt() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
MRApp app = new BlockingMRApp(2, 0, latch);
//this will start the job but job won't complete as Task is blocked
Job job = app.submit(new Configuration());
//wait and vailidate for Job to become RUNNING
app.waitForState(job, JobState.RUNNING);
Map<TaskId,Task> tasks = job.getTasks();
Assert.assertEquals("No of tasks is not correct", 2,
tasks.size());
Iterator<Task> it = tasks.values().iterator();
Task task1 = it.next();
Task task2 = it.next();
//wait for tasks to become running
app.waitForState(task1, TaskState.SCHEDULED);
app.waitForState(task2, TaskState.SCHEDULED);
//send the kill signal to the first Task's attempt
TaskAttempt attempt = task1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_KILL));
//unblock
latch.countDown();
//wait and validate for Job to become SUCCEEDED
//job will still succeed
app.waitForState(job, JobState.SUCCEEDED);
//first Task will have two attempts 1st is killed, 2nd Succeeds
//both Tasks and Job succeeds
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct", TaskState.SUCCEEDED,
task2.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts = task1.getAttempts();
Assert.assertEquals("No of attempts is not correct", 2,
attempts.size());
Iterator<TaskAttempt> iter = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.KILLED,
iter.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
iter.next().getReport().getTaskAttemptState());
attempts = task2.getAttempts();
Assert.assertEquals("No of attempts is not correct", 1,
attempts.size());
iter = attempts.values().iterator();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.SUCCEEDED,
iter.next().getReport().getTaskAttemptState());
}
static class BlockingMRApp extends MRApp {
private CountDownLatch latch;
BlockingMRApp(int maps, int reduces, CountDownLatch latch) {
super(maps, reduces, true, "testKill", true);
this.latch = latch;
}
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
//this blocks the first task's first attempt
//the subsequent ones are completed
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_DONE));
}
}
}
public static void main(String[] args) throws Exception {
TestKill t = new TestKill();
t.testKillJob();
t.testKillTask();
t.testKillTaskAttempt();
}
}
| 17,483
| 38.201794
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestAMInfos.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.util.Iterator;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.TestRecovery.MRAppWithHistory;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.junit.Test;
public class TestAMInfos {
@Test
public void testAMInfosWithoutRecoveryEnabled() throws Exception {
int runCount = 0;
MRApp app =
new MRAppWithHistory(1, 0, false, this.getClass().getName(), true,
++runCount);
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long am1StartTime = app.getAllAMInfos().get(0).getStartTime();
Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask = it.next();
app.waitForState(mapTask, TaskState.RUNNING);
TaskAttempt taskAttempt = mapTask.getAttempts().values().iterator().next();
app.waitForState(taskAttempt, TaskAttemptState.RUNNING);
// stop the app
app.stop();
// rerun
app =
new MRAppWithHistory(1, 0, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
// in rerun the AMInfo will be recovered from previous run even if recovery
// is not enabled.
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 1, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask = it.next();
// There should be two AMInfos
List<AMInfo> amInfos = app.getAllAMInfos();
Assert.assertEquals(2, amInfos.size());
AMInfo amInfoOne = amInfos.get(0);
Assert.assertEquals(am1StartTime, amInfoOne.getStartTime());
app.stop();
}
}
| 3,263
| 37.4
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
import org.apache.hadoop.mapreduce.v2.app.speculate.ExponentiallySmoothedTaskRuntimeEstimator;
import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
import org.apache.hadoop.mapreduce.v2.app.speculate.TaskRuntimeEstimator;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Assert;
import org.junit.Test;
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestRuntimeEstimators {
private static int INITIAL_NUMBER_FREE_SLOTS = 600;
private static int MAP_SLOT_REQUIREMENT = 3;
// this has to be at least as much as map slot requirement
private static int REDUCE_SLOT_REQUIREMENT = 4;
private static int MAP_TASKS = 200;
private static int REDUCE_TASKS = 150;
ControlledClock clock;
Job myJob;
AppContext myAppContext;
private static final Log LOG = LogFactory.getLog(TestRuntimeEstimators.class);
private final AtomicInteger slotsInUse = new AtomicInteger(0);
AsyncDispatcher dispatcher;
DefaultSpeculator speculator;
TaskRuntimeEstimator estimator;
// This is a huge kluge. The real implementations have a decent approach
private final AtomicInteger completedMaps = new AtomicInteger(0);
private final AtomicInteger completedReduces = new AtomicInteger(0);
private final AtomicInteger successfulSpeculations
= new AtomicInteger(0);
private final AtomicLong taskTimeSavedBySpeculation
= new AtomicLong(0L);
private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
private void coreTestEstimator
(TaskRuntimeEstimator testedEstimator, int expectedSpeculations) {
estimator = testedEstimator;
clock = new ControlledClock();
dispatcher = new AsyncDispatcher();
myJob = null;
slotsInUse.set(0);
completedMaps.set(0);
completedReduces.set(0);
successfulSpeculations.set(0);
taskTimeSavedBySpeculation.set(0);
clock.tickMsec(1000);
Configuration conf = new Configuration();
myAppContext = new MyAppContext(MAP_TASKS, REDUCE_TASKS);
myJob = myAppContext.getAllJobs().values().iterator().next();
estimator.contextualize(conf, myAppContext);
conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, 500L);
conf.setLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, 5000L);
conf.setDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, 0.1);
conf.setDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, 0.001);
conf.setInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, 5);
speculator = new DefaultSpeculator(conf, myAppContext, estimator, clock);
Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_NO_SPECULATE value",
500L, speculator.getSoonestRetryAfterNoSpeculate());
Assert.assertEquals("wrong SPECULATIVE_RETRY_AFTER_SPECULATE value",
5000L, speculator.getSoonestRetryAfterSpeculate());
Assert.assertEquals(speculator.getProportionRunningTasksSpeculatable(),
0.1, 0.00001);
Assert.assertEquals(speculator.getProportionTotalTasksSpeculatable(),
0.001, 0.00001);
Assert.assertEquals("wrong SPECULATIVE_MINIMUM_ALLOWED_TASKS value",
5, speculator.getMinimumAllowedSpeculativeTasks());
dispatcher.register(Speculator.EventType.class, speculator);
dispatcher.register(TaskEventType.class, new SpeculationRequestEventHandler());
dispatcher.init(conf);
dispatcher.start();
speculator.init(conf);
speculator.start();
// Now that the plumbing is hooked up, we do the following:
// do until all tasks are finished, ...
// 1: If we have spare capacity, assign as many map tasks as we can, then
// assign as many reduce tasks as we can. Note that an odd reduce
// task might be started while there are still map tasks, because
// map tasks take 3 slots and reduce tasks 2 slots.
// 2: Send a speculation event for every task attempt that's running
// note that new attempts might get started by the speculator
// discover undone tasks
int undoneMaps = MAP_TASKS;
int undoneReduces = REDUCE_TASKS;
// build a task sequence where all the maps precede any of the reduces
List<Task> allTasksSequence = new LinkedList<Task>();
allTasksSequence.addAll(myJob.getTasks(TaskType.MAP).values());
allTasksSequence.addAll(myJob.getTasks(TaskType.REDUCE).values());
while (undoneMaps + undoneReduces > 0) {
undoneMaps = 0; undoneReduces = 0;
// start all attempts which are new but for which there is enough slots
for (Task task : allTasksSequence) {
if (!task.isFinished()) {
if (task.getType() == TaskType.MAP) {
++undoneMaps;
} else {
++undoneReduces;
}
}
for (TaskAttempt attempt : task.getAttempts().values()) {
if (attempt.getState() == TaskAttemptState.NEW
&& INITIAL_NUMBER_FREE_SLOTS - slotsInUse.get()
>= taskTypeSlots(task.getType())) {
MyTaskAttemptImpl attemptImpl = (MyTaskAttemptImpl)attempt;
SpeculatorEvent event
= new SpeculatorEvent(attempt.getID(), false, clock.getTime());
speculator.handle(event);
attemptImpl.startUp();
} else {
// If a task attempt is in progress we should send the news to
// the Speculator.
TaskAttemptStatus status = new TaskAttemptStatus();
status.id = attempt.getID();
status.progress = attempt.getProgress();
status.stateString = attempt.getState().name();
status.taskState = attempt.getState();
SpeculatorEvent event = new SpeculatorEvent(status, clock.getTime());
speculator.handle(event);
}
}
}
long startTime = System.currentTimeMillis();
// drain the speculator event queue
while (!speculator.eventQueueEmpty()) {
Thread.yield();
if (System.currentTimeMillis() > startTime + 130000) {
return;
}
}
clock.tickMsec(1000L);
if (clock.getTime() % 10000L == 0L) {
speculator.scanForSpeculations();
}
}
Assert.assertEquals("We got the wrong number of successful speculations.",
expectedSpeculations, successfulSpeculations.get());
}
@Test
public void testLegacyEstimator() throws Exception {
TaskRuntimeEstimator specificEstimator = new LegacyTaskRuntimeEstimator();
coreTestEstimator(specificEstimator, 3);
}
@Test
public void testExponentialEstimator() throws Exception {
TaskRuntimeEstimator specificEstimator
= new ExponentiallySmoothedTaskRuntimeEstimator();
coreTestEstimator(specificEstimator, 3);
}
int taskTypeSlots(TaskType type) {
return type == TaskType.MAP ? MAP_SLOT_REQUIREMENT : REDUCE_SLOT_REQUIREMENT;
}
class SpeculationRequestEventHandler implements EventHandler<TaskEvent> {
@Override
public void handle(TaskEvent event) {
TaskId taskID = event.getTaskID();
Task task = myJob.getTask(taskID);
Assert.assertEquals
("Wrong type event", TaskEventType.T_ADD_SPEC_ATTEMPT, event.getType());
System.out.println("SpeculationRequestEventHandler.handle adds a speculation task for " + taskID);
addAttempt(task);
}
}
void addAttempt(Task task) {
MyTaskImpl myTask = (MyTaskImpl) task;
myTask.addAttempt();
}
class MyTaskImpl implements Task {
private final TaskId taskID;
private final Map<TaskAttemptId, TaskAttempt> attempts
= new ConcurrentHashMap<TaskAttemptId, TaskAttempt>(4);
MyTaskImpl(JobId jobID, int index, TaskType type) {
taskID = recordFactory.newRecordInstance(TaskId.class);
taskID.setId(index);
taskID.setTaskType(type);
taskID.setJobId(jobID);
}
void addAttempt() {
TaskAttempt taskAttempt
= new MyTaskAttemptImpl(taskID, attempts.size(), clock);
TaskAttemptId taskAttemptID = taskAttempt.getID();
attempts.put(taskAttemptID, taskAttempt);
System.out.println("TLTRE.MyTaskImpl.addAttempt " + getID());
SpeculatorEvent event = new SpeculatorEvent(taskID, +1);
dispatcher.getEventHandler().handle(event);
}
@Override
public TaskId getID() {
return taskID;
}
@Override
public TaskReport getReport() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Counters getCounters() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public float getProgress() {
float result = 0.0F;
for (TaskAttempt attempt : attempts.values()) {
result = Math.max(result, attempt.getProgress());
}
return result;
}
@Override
public TaskType getType() {
return taskID.getTaskType();
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
Map<TaskAttemptId, TaskAttempt> result
= new HashMap<TaskAttemptId, TaskAttempt>(attempts.size());
result.putAll(attempts);
return result;
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
return attempts.get(attemptID);
}
@Override
public boolean isFinished() {
for (TaskAttempt attempt : attempts.values()) {
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
return true;
}
}
return false;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public TaskState getState() {
throw new UnsupportedOperationException("Not supported yet.");
}
}
class MyJobImpl implements Job {
private final JobId jobID;
private final Map<TaskId, Task> allTasks = new HashMap<TaskId, Task>();
private final Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
private final Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
MyJobImpl(JobId jobID, int numMaps, int numReduces) {
this.jobID = jobID;
for (int i = 0; i < numMaps; ++i) {
Task newTask = new MyTaskImpl(jobID, i, TaskType.MAP);
mapTasks.put(newTask.getID(), newTask);
allTasks.put(newTask.getID(), newTask);
}
for (int i = 0; i < numReduces; ++i) {
Task newTask = new MyTaskImpl(jobID, i, TaskType.REDUCE);
reduceTasks.put(newTask.getID(), newTask);
allTasks.put(newTask.getID(), newTask);
}
// give every task an attempt
for (Task task : allTasks.values()) {
MyTaskImpl myTaskImpl = (MyTaskImpl) task;
myTaskImpl.addAttempt();
}
}
@Override
public JobId getID() {
return jobID;
}
@Override
public JobState getState() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public JobReport getReport() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public float getProgress() {
return 0;
}
@Override
public Counters getAllCounters() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Map<TaskId, Task> getTasks() {
return allTasks;
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
return taskType == TaskType.MAP ? mapTasks : reduceTasks;
}
@Override
public Task getTask(TaskId taskID) {
return allTasks.get(taskID);
}
@Override
public List<String> getDiagnostics() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getCompletedMaps() {
return completedMaps.get();
}
@Override
public int getCompletedReduces() {
return completedReduces.get();
}
@Override
public TaskAttemptCompletionEvent[]
getTaskAttemptCompletionEvents(int fromEventId, int maxEvents) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public TaskCompletionEvent[]
getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getQueueName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getTotalMaps() {
return mapTasks.size();
}
@Override
public int getTotalReduces() {
return reduceTasks.size();
}
@Override
public boolean isUber() {
return false;
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation) {
return true;
}
@Override
public String getUserName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Path getConfFile() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<AMInfo> getAMInfos() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Configuration loadConfFile() {
throw new UnsupportedOperationException();
}
@Override
public void setQueueName(String queueName) {
// do nothing
}
}
/*
* We follow the pattern of the real XxxImpl . We create a job and initialize
* it with a full suite of tasks which in turn have one attempt each in the
* NEW state. Attempts transition only from NEW to RUNNING to SUCCEEDED .
*/
class MyTaskAttemptImpl implements TaskAttempt {
private final TaskAttemptId myAttemptID;
long startMockTime = Long.MIN_VALUE;
long shuffleCompletedTime = Long.MAX_VALUE;
TaskAttemptState overridingState = TaskAttemptState.NEW;
MyTaskAttemptImpl(TaskId taskID, int index, Clock clock) {
myAttemptID = recordFactory.newRecordInstance(TaskAttemptId.class);
myAttemptID.setId(index);
myAttemptID.setTaskId(taskID);
}
void startUp() {
startMockTime = clock.getTime();
overridingState = null;
slotsInUse.addAndGet(taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
System.out.println("TLTRE.MyTaskAttemptImpl.startUp starting " + getID());
SpeculatorEvent event = new SpeculatorEvent(getID().getTaskId(), -1);
dispatcher.getEventHandler().handle(event);
}
@Override
public NodeId getNodeId() throws UnsupportedOperationException{
throw new UnsupportedOperationException();
}
@Override
public TaskAttemptId getID() {
return myAttemptID;
}
@Override
public TaskAttemptReport getReport() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public List<String> getDiagnostics() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Counters getCounters() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int getShufflePort() {
throw new UnsupportedOperationException("Not supported yet.");
}
private float getCodeRuntime() {
int taskIndex = myAttemptID.getTaskId().getId();
int attemptIndex = myAttemptID.getId();
float result = 200.0F;
switch (taskIndex % 4) {
case 0:
if (taskIndex % 40 == 0 && attemptIndex == 0) {
result = 600.0F;
break;
}
break;
case 2:
break;
case 1:
result = 150.0F;
break;
case 3:
result = 250.0F;
break;
}
return result;
}
private float getMapProgress() {
float runtime = getCodeRuntime();
return Math.min
((float) (clock.getTime() - startMockTime) / (runtime * 1000.0F), 1.0F);
}
private float getReduceProgress() {
Job job = myAppContext.getJob(myAttemptID.getTaskId().getJobId());
float runtime = getCodeRuntime();
Collection<Task> allMapTasks = job.getTasks(TaskType.MAP).values();
int numberMaps = allMapTasks.size();
int numberDoneMaps = 0;
for (Task mapTask : allMapTasks) {
if (mapTask.isFinished()) {
++numberDoneMaps;
}
}
if (numberMaps == numberDoneMaps) {
shuffleCompletedTime = Math.min(shuffleCompletedTime, clock.getTime());
return Math.min
((float) (clock.getTime() - shuffleCompletedTime)
/ (runtime * 2000.0F) + 0.5F,
1.0F);
} else {
return ((float) numberDoneMaps) / numberMaps * 0.5F;
}
}
// we compute progress from time and an algorithm now
@Override
public float getProgress() {
if (overridingState == TaskAttemptState.NEW) {
return 0.0F;
}
return myAttemptID.getTaskId().getTaskType() == TaskType.MAP ? getMapProgress() : getReduceProgress();
}
@Override
public Phase getPhase() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public TaskAttemptState getState() {
if (overridingState != null) {
return overridingState;
}
TaskAttemptState result
= getProgress() < 1.0F ? TaskAttemptState.RUNNING : TaskAttemptState.SUCCEEDED;
if (result == TaskAttemptState.SUCCEEDED) {
overridingState = TaskAttemptState.SUCCEEDED;
System.out.println("MyTaskAttemptImpl.getState() -- attempt " + myAttemptID + " finished.");
slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
(myAttemptID.getTaskId().getTaskType() == TaskType.MAP
? completedMaps : completedReduces).getAndIncrement();
// check for a spectacularly successful speculation
TaskId taskID = myAttemptID.getTaskId();
Task task = myJob.getTask(taskID);
for (TaskAttempt otherAttempt : task.getAttempts().values()) {
if (otherAttempt != this
&& otherAttempt.getState() == TaskAttemptState.RUNNING) {
// we had two instances running. Try to determine how much
// we might have saved by speculation
if (getID().getId() > otherAttempt.getID().getId()) {
// the speculation won
successfulSpeculations.getAndIncrement();
float hisProgress = otherAttempt.getProgress();
long hisStartTime = ((MyTaskAttemptImpl)otherAttempt).startMockTime;
System.out.println("TLTRE:A speculation finished at time "
+ clock.getTime()
+ ". The stalled attempt is at " + (hisProgress * 100.0)
+ "% progress, and it started at "
+ hisStartTime + ", which is "
+ (clock.getTime() - hisStartTime) + " ago.");
long originalTaskEndEstimate
= (hisStartTime
+ estimator.estimatedRuntime(otherAttempt.getID()));
System.out.println(
"TLTRE: We would have expected the original attempt to take "
+ estimator.estimatedRuntime(otherAttempt.getID())
+ ", finishing at " + originalTaskEndEstimate);
long estimatedSavings = originalTaskEndEstimate - clock.getTime();
taskTimeSavedBySpeculation.addAndGet(estimatedSavings);
System.out.println("TLTRE: The task is " + task.getID());
slotsInUse.addAndGet(- taskTypeSlots(myAttemptID.getTaskId().getTaskType()));
((MyTaskAttemptImpl)otherAttempt).overridingState
= TaskAttemptState.KILLED;
} else {
System.out.println(
"TLTRE: The normal attempt beat the speculation in "
+ task.getID());
}
}
}
}
return result;
}
@Override
public boolean isFinished() {
return getProgress() == 1.0F;
}
@Override
public ContainerId getAssignedContainerID() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getNodeHttpAddress() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getNodeRackName() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getLaunchTime() {
return startMockTime;
}
@Override
public long getFinishTime() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getShuffleFinishTime() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public long getSortFinishTime() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getAssignedContainerMgrAddress() {
throw new UnsupportedOperationException("Not supported yet.");
}
}
class MyAppMaster extends CompositeService {
final Clock clock;
public MyAppMaster(Clock clock) {
super(MyAppMaster.class.getName());
if (clock == null) {
clock = new SystemClock();
}
this.clock = clock;
LOG.info("Created MyAppMaster");
}
}
class MyAppContext implements AppContext {
private final ApplicationAttemptId myAppAttemptID;
private final ApplicationId myApplicationID;
private final JobId myJobID;
private final Map<JobId, Job> allJobs;
MyAppContext(int numberMaps, int numberReduces) {
myApplicationID = ApplicationId.newInstance(clock.getTime(), 1);
myAppAttemptID = ApplicationAttemptId.newInstance(myApplicationID, 0);
myJobID = recordFactory.newRecordInstance(JobId.class);
myJobID.setAppId(myApplicationID);
Job myJob
= new MyJobImpl(myJobID, numberMaps, numberReduces);
allJobs = Collections.singletonMap(myJobID, myJob);
}
@Override
public ApplicationAttemptId getApplicationAttemptId() {
return myAppAttemptID;
}
@Override
public ApplicationId getApplicationID() {
return myApplicationID;
}
@Override
public Job getJob(JobId jobID) {
return allJobs.get(jobID);
}
@Override
public Map<JobId, Job> getAllJobs() {
return allJobs;
}
@Override
public EventHandler getEventHandler() {
return dispatcher.getEventHandler();
}
@Override
public CharSequence getUser() {
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public Clock getClock() {
return clock;
}
@Override
public String getApplicationName() {
return null;
}
@Override
public long getStartTime() {
return 0;
}
@Override
public ClusterInfo getClusterInfo() {
return new ClusterInfo();
}
@Override
public Set<String> getBlacklistedNodes() {
return null;
}
@Override
public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() {
return null;
}
@Override
public boolean isLastAMRetry() {
return false;
}
@Override
public boolean hasSuccessfullyUnregistered() {
// bogus - Not Required
return true;
}
@Override
public String getNMHostname() {
// bogus - Not Required
return null;
}
@Override
public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
return null;
}
}
}
| 27,694
| 29.978747
| 108
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.Test;
/**
* Tests the state machine of MR App.
*/
@SuppressWarnings("unchecked")
public class TestMRApp {
@Test
public void testMapReduce() throws Exception {
MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals(System.getProperty("user.name"),job.getUserName());
}
@Test
public void testZeroMaps() throws Exception {
MRApp app = new MRApp(0, 1, true, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
@Test
public void testZeroMapReduces() throws Exception{
MRApp app = new MRApp(0, 0, true, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
}
@Test
public void testCommitPending() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
app.waitForState(attempt, TaskAttemptState.RUNNING);
//send the commit pending signal to the task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
attempt.getID(),
TaskAttemptEventType.TA_COMMIT_PENDING));
//wait for first attempt to commit pending
app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
//re-send the commit pending signal to the task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
attempt.getID(),
TaskAttemptEventType.TA_COMMIT_PENDING));
//the task attempt should be still at COMMIT_PENDING
app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
//send the done signal to the task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
task.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
}
//@Test
public void testCompletedMapsForReduceSlowstart() throws Exception {
MRApp app = new MRApp(2, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
//after half of the map completion, reduce will start
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
//uberization forces full slowstart (1.0), so disable that
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.NEW, reduceTask.getReport().getTaskState());
//send the done signal to the 1st map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask1.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
//Once the first map completes, it will schedule the reduces
//now reduce must be running
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to 2nd map and the reduce to complete the job
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
}
/**
* The test verifies that the AM re-runs maps that have run on bad nodes. It
* also verifies that the AM records all success/killed events so that reduces
* are notified about map output status changes. It also verifies that the
* re-run information is preserved across AM restart
*/
@Test
public void testUpdatedNodes() throws Exception {
int runCount = 0;
MRApp app = new MRAppWithHistory(2, 2, false, this.getClass().getName(),
true, ++runCount);
Configuration conf = new Configuration();
// after half of the map completion, reduce will start
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
// uberization forces full slowstart (1.0), so disable that
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 4, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator()
.next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator()
.next();
NodeId node1 = task1Attempt.getNodeId();
NodeId node2 = task2Attempt.getNodeId();
Assert.assertEquals(node1, node2);
// send the done signal to the task
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task1Attempt.getID(),
TaskAttemptEventType.TA_DONE));
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task2Attempt.getID(),
TaskAttemptEventType.TA_DONE));
// all maps must be succeeded
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events = job.getTaskAttemptCompletionEvents(0,
100);
Assert.assertEquals("Expecting 2 completion events for success", 2,
events.length);
// send updated nodes info
ArrayList<NodeReport> updatedNodes = new ArrayList<NodeReport>();
NodeReport nr = RecordFactoryProvider.getRecordFactory(null)
.newRecordInstance(NodeReport.class);
nr.setNodeId(node1);
nr.setNodeState(NodeState.UNHEALTHY);
updatedNodes.add(nr);
app.getContext().getEventHandler()
.handle(new JobUpdatedNodesEvent(job.getID(), updatedNodes));
app.waitForState(task1Attempt, TaskAttemptState.KILLED);
app.waitForState(task2Attempt, TaskAttemptState.KILLED);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Expecting 2 more completion events for killed", 4,
events.length);
// all maps must be back to running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
Iterator<TaskAttempt> itr = mapTask1.getAttempts().values().iterator();
itr.next();
task1Attempt = itr.next();
// send the done signal to the task
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task1Attempt.getID(),
TaskAttemptEventType.TA_DONE));
// map1 must be succeeded. map2 must be running
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Expecting 1 more completion events for success", 5,
events.length);
// Crash the app again.
app.stop();
// rerun
// in rerun the 1st map will be recovered from previous run
app = new MRAppWithHistory(2, 2, false, this.getClass().getName(), false,
++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("No of tasks not correct", 4, job.getTasks().size());
it = job.getTasks().values().iterator();
mapTask1 = it.next();
mapTask2 = it.next();
Task reduceTask1 = it.next();
Task reduceTask2 = it.next();
// map 1 will be recovered, no need to send done
app.waitForState(mapTask1, TaskState.SUCCEEDED);
app.waitForState(mapTask2, TaskState.RUNNING);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals(
"Expecting 2 completion events for killed & success of map1", 2,
events.length);
task2Attempt = mapTask2.getAttempts().values().iterator().next();
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task2Attempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2, TaskState.SUCCEEDED);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Expecting 1 more completion events for success", 3,
events.length);
app.waitForState(reduceTask1, TaskState.RUNNING);
app.waitForState(reduceTask2, TaskState.RUNNING);
TaskAttempt task3Attempt = reduceTask1.getAttempts().values().iterator()
.next();
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task3Attempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task3Attempt.getID(),
TaskAttemptEventType.TA_KILL));
app.waitForState(reduceTask1, TaskState.SUCCEEDED);
TaskAttempt task4Attempt = reduceTask2.getAttempts().values().iterator()
.next();
app.getContext()
.getEventHandler()
.handle(
new TaskAttemptEvent(task4Attempt.getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2, TaskState.SUCCEEDED);
events = job.getTaskAttemptCompletionEvents(0, 100);
Assert.assertEquals("Expecting 2 more completion events for reduce success",
5, events.length);
// job succeeds
app.waitForState(job, JobState.SUCCEEDED);
}
@Test
public void testJobError() throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an invalid event on task at current state
app.getContext().getEventHandler().handle(
new TaskEvent(
task.getID(), TaskEventType.T_SCHEDULE));
//this must lead to job error
app.waitForState(job, JobState.ERROR);
}
@SuppressWarnings("resource")
@Test
public void testJobSuccess() throws Exception {
MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true, false);
JobImpl job = (JobImpl) app.submit(new Configuration());
app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
// AM is not unregistered
Assert.assertEquals(JobState.RUNNING, job.getState());
// imitate that AM is unregistered
app.successfullyUnregistered.set(true);
app.waitForState(job, JobState.SUCCEEDED);
}
@Test
public void testJobRebootNotLastRetryOnUnregistrationFailure()
throws Exception {
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true);
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an reboot event
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
JobEventType.JOB_AM_REBOOT));
// return exteranl state as RUNNING since otherwise the JobClient will
// prematurely exit.
app.waitForState(job, JobState.RUNNING);
}
@Test
public void testJobRebootOnLastRetryOnUnregistrationFailure()
throws Exception {
// make startCount as 2 since this is last retry which equals to
// DEFAULT_MAX_AM_RETRY
// The last param mocks the unregistration failure
MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2, false);
Configuration conf = new Configuration();
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task task = it.next();
app.waitForState(task, TaskState.RUNNING);
//send an reboot event
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),
JobEventType.JOB_AM_REBOOT));
app.waitForInternalState((JobImpl) job, JobStateInternal.REBOOT);
// return exteranl state as RUNNING if this is the last retry while
// unregistration fails
app.waitForState(job, JobState.RUNNING);
}
private final class MRAppWithSpiedJob extends MRApp {
private JobImpl spiedJob;
private MRAppWithSpiedJob(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@Override
protected Job createJob(Configuration conf, JobStateInternal forcedState,
String diagnostic) {
spiedJob = spy((JobImpl) super.createJob(conf, forcedState, diagnostic));
((AppContext) getContext()).getAllJobs().put(spiedJob.getID(), spiedJob);
return spiedJob;
}
}
@Test
public void testCountersOnJobFinish() throws Exception {
MRAppWithSpiedJob app =
new MRAppWithSpiedJob(1, 1, true, this.getClass().getName(), true);
JobImpl job = (JobImpl)app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
System.out.println(job.getAllCounters());
// Just call getCounters
job.getAllCounters();
job.getAllCounters();
// Should be called only once
verify(job, times(1)).constructFinalFullcounters();
}
@Test
public void checkJobStateTypeConversion() {
//verify that all states can be converted without
// throwing an exception
for (JobState state : JobState.values()) {
TypeConverter.fromYarn(state);
}
}
@Test
public void checkTaskStateTypeConversion() {
//verify that all states can be converted without
// throwing an exception
for (TaskState state : TaskState.values()) {
TypeConverter.fromYarn(state);
}
}
private Container containerObtainedByContainerLauncher;
@Test
public void testContainerPassThrough() throws Exception {
MRApp app = new MRApp(0, 1, true, this.getClass().getName(), true) {
@Override
protected ContainerLauncher createContainerLauncher(AppContext context) {
return new MockContainerLauncher() {
@Override
public void handle(ContainerLauncherEvent event) {
if (event instanceof ContainerRemoteLaunchEvent) {
containerObtainedByContainerLauncher =
((ContainerRemoteLaunchEvent) event).getAllocatedContainer();
}
super.handle(event);
}
};
};
};
Job job = app.submit(new Configuration());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Collection<Task> tasks = job.getTasks().values();
Collection<TaskAttempt> taskAttempts =
tasks.iterator().next().getAttempts().values();
TaskAttemptImpl taskAttempt =
(TaskAttemptImpl) taskAttempts.iterator().next();
// Container from RM should pass through to the launcher. Container object
// should be the same.
Assert.assertTrue(taskAttempt.container
== containerObtainedByContainerLauncher);
}
private final class MRAppWithHistory extends MRApp {
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, int startCount) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
JobHistoryEventHandler eventHandler = new JobHistoryEventHandler(context,
getStartCount());
return eventHandler;
}
}
public static void main(String[] args) throws Exception {
TestMRApp t = new TestMRApp();
t.testMapReduce();
t.testZeroMapReduces();
t.testCommitPending();
t.testCompletedMapsForReduceSlowstart();
t.testJobError();
t.testCountersOnJobFinish();
}
}
| 21,033
| 37.313297
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
/**
* Class AppForTest publishes a methods for test
*/
public class AppForTest extends App {
public AppForTest(AppContext ctx) {
super(ctx);
}
public void setJob(Job job) {
super.setJob(job);
}
@Override
public void setTask(Task task) {
super.setTask(task);
}
}
| 1,296
| 29.880952
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.util.Iterator;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.MimeType;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestAppController {
private AppControllerForTest appController;
private RequestContext ctx;
private Job job;
private static final String taskId = "task_01_01_m_01";
@Before
public void setUp() throws IOException {
AppContext context = mock(AppContext.class);
when(context.getApplicationID()).thenReturn(
ApplicationId.newInstance(0, 0));
when(context.getApplicationName()).thenReturn("AppName");
when(context.getUser()).thenReturn("User");
when(context.getStartTime()).thenReturn(System.currentTimeMillis());
job = mock(Job.class);
Task task = mock(Task.class);
when(job.getTask(any(TaskId.class))).thenReturn(task);
JobId jobID = MRApps.toJobID("job_01_01");
when(context.getJob(jobID)).thenReturn(job);
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
App app = new App(context);
Configuration configuration = new Configuration();
ctx = mock(RequestContext.class);
appController = new AppControllerForTest(app, configuration, ctx);
appController.getProperty().put(AMParams.JOB_ID, "job_01_01");
appController.getProperty().put(AMParams.TASK_ID, taskId);
}
/**
* test bad request should be status 400...
*/
@Test
public void testBadRequest() {
String message = "test string";
appController.badRequest(message);
verifyExpectations(message);
}
@Test
public void testBadRequestWithNullMessage() {
// It should not throw NullPointerException
appController.badRequest(null);
verifyExpectations(StringUtils.EMPTY);
}
private void verifyExpectations(String message) {
verify(ctx).setStatus(400);
assertEquals("application_0_0000", appController.getProperty()
.get("app.id"));
assertNotNull(appController.getProperty().get("rm.web"));
assertEquals("Bad request: " + message,
appController.getProperty().get("title"));
}
/**
* Test the method 'info'.
*/
@Test
public void testInfo() {
appController.info();
Iterator<ResponseInfo.Item> iterator = appController.getResponseInfo()
.iterator();
ResponseInfo.Item item = iterator.next();
assertEquals("Application ID:", item.key);
assertEquals("application_0_0000", item.value);
item = iterator.next();
assertEquals("Application Name:", item.key);
assertEquals("AppName", item.value);
item = iterator.next();
assertEquals("User:", item.key);
assertEquals("User", item.value);
item = iterator.next();
assertEquals("Started on:", item.key);
item = iterator.next();
assertEquals("Elasped: ", item.key);
}
/**
* Test method 'job'. Should print message about error or set JobPage class for rendering
*/
@Test
public void testGetJob() {
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(false);
appController.job();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.job();
assertEquals(
"Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",
appController.getData());
appController.getProperty().put(AMParams.JOB_ID, "job_01_01");
appController.job();
assertEquals(JobPage.class, appController.getClazz());
}
/**
* Test method 'jobCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test
public void testGetJobCounters() {
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(false);
appController.jobCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.jobCounters();
assertEquals(
"Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",
appController.getData());
appController.getProperty().put(AMParams.JOB_ID, "job_01_01");
appController.jobCounters();
assertEquals(CountersPage.class, appController.getClazz());
}
/**
* Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test
public void testGetTaskCounters() {
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(false);
appController.taskCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.taskCounters();
assertEquals(
"Access denied: User user does not have permission to view job job_01_01missing task ID",
appController.getData());
appController.getProperty().put(AMParams.TASK_ID, taskId);
appController.taskCounters();
assertEquals(CountersPage.class, appController.getClazz());
}
/**
* Test method 'singleJobCounter'. Should set SingleCounterPage class for rendering
*/
@Test
public void testGetSingleJobCounter() throws IOException {
appController.singleJobCounter();
assertEquals(SingleCounterPage.class, appController.getClazz());
}
/**
* Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering
*/
@Test
public void testGetSingleTaskCounter() throws IOException {
appController.singleTaskCounter();
assertEquals(SingleCounterPage.class, appController.getClazz());
assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP));
assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME));
}
/**
* Test method 'tasks'. Should set TasksPage class for rendering
*/
@Test
public void testTasks() {
appController.tasks();
assertEquals(TasksPage.class, appController.getClazz());
}
/**
* Test method 'task'. Should set TaskPage class for rendering and information for title
*/
@Test
public void testTask() {
appController.task();
assertEquals("Attempts for " + taskId ,
appController.getProperty().get("title"));
assertEquals(TaskPage.class, appController.getClazz());
}
/**
* Test method 'conf'. Should set JobConfPage class for rendering
*/
@Test
public void testConfiguration() {
appController.conf();
assertEquals(JobConfPage.class, appController.getClazz());
}
/**
* Test method 'conf'. Should set AttemptsPage class for rendering or print information about error
*/
@Test
public void testAttempts() {
appController.getProperty().remove(AMParams.TASK_TYPE);
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(false);
appController.attempts();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.attempts();
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
appController.getProperty().put(AMParams.TASK_ID, taskId);
appController.attempts();
assertEquals("Bad request: missing task-type.", appController.getProperty()
.get("title"));
appController.getProperty().put(AMParams.TASK_TYPE, "m");
appController.attempts();
assertEquals("Bad request: missing attempt-state.", appController
.getProperty().get("title"));
appController.getProperty().put(AMParams.ATTEMPT_STATE, "State");
appController.attempts();
assertEquals(AttemptsPage.class, appController.getClazz());
}
}
| 10,428
| 32.533762
| 109
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
|
/**
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringReader;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the app master web service Rest API for getting task attempts, a
* specific task attempt, and task attempt counters
*
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters
*/
public class TestAMWebServicesAttempts extends JerseyTest {
private static Configuration conf = new Configuration();
private static AppContext appContext;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockAppContext(0, 1, 2, 1);
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestAMWebServicesAttempts() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testTaskAttempts() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("attempts/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList attempts = dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements", 1, attempts.getLength());
NodeList nodes = dom.getElementsByTagName("taskAttempt");
verifyAMTaskAttemptsXML(nodes, task);
}
}
}
@Test
public void testTaskAttemptId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid + "/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("taskAttempt");
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyAMTaskAttemptXML(element, att, task.getType());
}
}
}
}
}
@Test
public void testTaskAttemptIdBogus() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("bogusid",
"java.lang.Exception: TaskAttemptId string : bogusid is not properly formed");
}
@Test
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric(
"attempt_0_12345_m_000000_0",
"java.lang.Exception: Error getting info on task attempt id attempt_0_12345_m_000000_0");
}
@Test
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_0_12345_d_000000_0",
"java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : attempt_0_12345_d_000000_0 is not properly formed.");
}
@Test
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_12345_m_000000_0",
"java.lang.Exception: TaskAttemptId string : attempt_12345_m_000000_0 is not properly formed");
}
@Test
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_0_12345_m_000000",
"java.lang.Exception: TaskAttemptId string : attempt_0_12345_m_000000 is not properly formed");
}
private void testTaskAttemptIdErrorGeneric(String attid, String error)
throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).path("attempts").path(attid)
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message", error,
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
}
public void verifyAMTaskAttemptXML(Element element, TaskAttempt att,
TaskType ttype) {
verifyTaskAttemptGeneric(att, ttype,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "type"),
WebServicesTestUtils.getXmlString(element, "rack"),
WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlFloat(element, "progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att,
WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
}
}
public void verifyAMTaskAttempt(JSONObject info, TaskAttempt att,
TaskType ttype) throws JSONException {
if (ttype == TaskType.REDUCE) {
assertEquals("incorrect number of elements", 17, info.length());
} else {
assertEquals("incorrect number of elements", 12, info.length());
}
verifyTaskAttemptGeneric(att, ttype, info.getString("id"),
info.getString("state"), info.getString("type"),
info.getString("rack"), info.getString("nodeHttpAddress"),
info.getString("diagnostics"), info.getString("assignedContainerId"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), (float) info.getDouble("progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"),
info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"),
info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
}
}
public void verifyAMTaskAttempts(JSONObject json, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 1, json.length());
JSONObject attempts = json.getJSONObject("taskAttempts");
assertEquals("incorrect number of elements", 1, json.length());
JSONArray arr = attempts.getJSONArray("taskAttempt");
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId id = att.getID();
String attid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < arr.length(); i++) {
JSONObject info = arr.getJSONObject(i);
if (attid.matches(info.getString("id"))) {
found = true;
verifyAMTaskAttempt(info, att, task.getType());
}
}
assertTrue("task attempt with id: " + attid
+ " not in web service output", found);
}
}
public void verifyAMTaskAttemptsXML(NodeList nodes, Task task) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId id = att.getID();
String attid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
found = true;
verifyAMTaskAttemptXML(element, att, task.getType());
}
}
assertTrue("task with id: " + attid + " not in web service output", found);
}
}
public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype,
String id, String state, String type, String rack,
String nodeHttpAddress, String diagnostics, String assignedContainerId,
long startTime, long finishTime, long elapsedTime, float progress) {
TaskAttemptId attid = ta.getID();
String attemptId = MRApps.toString(attid);
WebServicesTestUtils.checkStringMatch("id", attemptId, id);
WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(),
state);
WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
WebServicesTestUtils.checkStringMatch("nodeHttpAddress",
ta.getNodeHttpAddress(), nodeHttpAddress);
String expectDiag = "";
List<String> diagnosticsList = ta.getDiagnostics();
if (diagnosticsList != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagnosticsList) {
b.append(diag);
}
expectDiag = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag,
diagnostics);
WebServicesTestUtils.checkStringMatch("assignedContainerId",
ConverterUtils.toString(ta.getAssignedContainerID()),
assignedContainerId);
assertEquals("startTime wrong", ta.getLaunchTime(), startTime);
assertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
}
public void verifyReduceTaskAttemptGeneric(TaskAttempt ta,
long shuffleFinishTime, long mergeFinishTime, long elapsedShuffleTime,
long elapsedMergeTime, long elapsedReduceTime) {
assertEquals("shuffleFinishTime wrong", ta.getShuffleFinishTime(),
shuffleFinishTime);
assertEquals("mergeFinishTime wrong", ta.getSortFinishTime(),
mergeFinishTime);
assertEquals("elapsedShuffleTime wrong",
ta.getShuffleFinishTime() - ta.getLaunchTime(), elapsedShuffleTime);
assertEquals("elapsedMergeTime wrong",
ta.getSortFinishTime() - ta.getShuffleFinishTime(), elapsedMergeTime);
assertEquals("elapsedReduceTime wrong",
ta.getFinishTime() - ta.getSortFinishTime(), elapsedReduceTime);
}
@Test
public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("counters")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskAttemptCounters");
verifyAMJobTaskAttemptCounters(info, att);
}
}
}
}
@Test
public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("counters")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");
verifyAMTaskCountersXML(nodes, att);
}
}
}
}
public void verifyAMJobTaskAttemptCounters(JSONObject info, TaskAttempt att)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("taskAttemptCounterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("name not set",
(counterName != null && !counterName.isEmpty()));
long value = counter.getLong("value");
assertTrue("value >= 0", value >= 0);
}
}
}
public void verifyAMTaskCountersXML(NodeList nodes, TaskAttempt att) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("taskAttemptCounterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long value = WebServicesTestUtils.getXmlLong(counter, "value");
assertTrue("value not >= 0", value >= 0);
}
}
}
}
}
| 27,273
| 39.951952
| 131
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringReader;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Times;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the app master web service Rest API for getting jobs, a specific job,
* and job counters.
*
* /ws/v1/mapreduce/jobs
* /ws/v1/mapreduce/jobs/{jobid}
* /ws/v1/mapreduce/jobs/{jobid}/counters
* /ws/v1/mapreduce/jobs/{jobid}/jobattempts
*/
public class TestAMWebServicesJobs extends JerseyTest {
private static Configuration conf = new Configuration();
private static AppContext appContext;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockAppContext(0, 1, 2, 1);
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestAMWebServicesJobs() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testJobs() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info, job);
}
@Test
public void testJobsSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info, job);
}
@Test
public void testJobsDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info, job);
}
@Test
public void testJobsXML() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList jobs = dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements", 1, jobs.getLength());
NodeList job = dom.getElementsByTagName("job");
assertEquals("incorrect number of elements", 1, job.getLength());
verifyAMJobXML(job, appContext);
}
@Test
public void testJobId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
verifyAMJob(info, jobsMap.get(id));
}
}
@Test
public void testJobIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
verifyAMJob(info, jobsMap.get(id));
}
}
@Test
public void testJobIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
verifyAMJob(info, jobsMap.get(id));
}
}
@Test
public void testJobIdNonExist() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs")
.path("job_0_1234").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: job, job_0_1234, is not found", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testJobIdInvalid() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
// verify the exception output default is JSON
@Test
public void testJobIdInvalidDefault() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
.get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
// test that the exception output works in XML
@Test
public void testJobIdInvalidXML() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
.accept(MediaType.APPLICATION_XML).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String msg = response.getEntity(String.class);
System.out.println(msg);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("RemoteException");
Element element = (Element) nodes.item(0);
String message = WebServicesTestUtils.getXmlString(element, "message");
String type = WebServicesTestUtils.getXmlString(element, "exception");
String classname = WebServicesTestUtils.getXmlString(element,
"javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
private void verifyJobIdInvalid(String message, String type, String classname) {
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: JobId string : job_foo is not properly formed",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
@Test
public void testJobIdInvalidBogus() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo")
.get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: JobId string : bogusfoo is not properly formed",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testJobIdXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList job = dom.getElementsByTagName("job");
verifyAMJobXML(job, appContext);
}
}
public void verifyAMJob(JSONObject info, Job job) throws JSONException {
assertEquals("incorrect number of elements", 31, info.length());
// everyone access fields
verifyAMJobGeneric(job, info.getString("id"), info.getString("user"),
info.getString("name"), info.getString("state"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), info.getInt("mapsTotal"),
info.getInt("mapsCompleted"), info.getInt("reducesTotal"),
info.getInt("reducesCompleted"),
(float) info.getDouble("reduceProgress"),
(float) info.getDouble("mapProgress"));
String diagnostics = "";
if (info.has("diagnostics")) {
diagnostics = info.getString("diagnostics");
}
// restricted access fields - if security and acls set
verifyAMJobGenericSecure(job, info.getInt("mapsPending"),
info.getInt("mapsRunning"), info.getInt("reducesPending"),
info.getInt("reducesRunning"), info.getBoolean("uberized"),
diagnostics, info.getInt("newReduceAttempts"),
info.getInt("runningReduceAttempts"),
info.getInt("failedReduceAttempts"),
info.getInt("killedReduceAttempts"),
info.getInt("successfulReduceAttempts"), info.getInt("newMapAttempts"),
info.getInt("runningMapAttempts"), info.getInt("failedMapAttempts"),
info.getInt("killedMapAttempts"), info.getInt("successfulMapAttempts"));
Map<JobACL, AccessControlList> allacls = job.getJobACLs();
if (allacls != null) {
for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
String expectName = entry.getKey().getAclName();
String expectValue = entry.getValue().getAclString();
Boolean found = false;
// make sure ws includes it
if (info.has("acls")) {
JSONArray arr = info.getJSONArray("acls");
for (int i = 0; i < arr.length(); i++) {
JSONObject aclInfo = arr.getJSONObject(i);
if (expectName.matches(aclInfo.getString("name"))) {
found = true;
WebServicesTestUtils.checkStringMatch("value", expectValue,
aclInfo.getString("value"));
}
}
} else {
fail("should have acls in the web service info");
}
assertTrue("acl: " + expectName + " not found in webservice output",
found);
}
}
}
public void verifyAMJobXML(NodeList nodes, AppContext appContext) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
Job job = appContext.getJob(MRApps.toJobID(WebServicesTestUtils
.getXmlString(element, "id")));
assertNotNull("Job not found - output incorrect", job);
verifyAMJobGeneric(job, WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlString(element, "name"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlInt(element, "mapsTotal"),
WebServicesTestUtils.getXmlInt(element, "mapsCompleted"),
WebServicesTestUtils.getXmlInt(element, "reducesTotal"),
WebServicesTestUtils.getXmlInt(element, "reducesCompleted"),
WebServicesTestUtils.getXmlFloat(element, "reduceProgress"),
WebServicesTestUtils.getXmlFloat(element, "mapProgress"));
// restricted access fields - if security and acls set
verifyAMJobGenericSecure(job,
WebServicesTestUtils.getXmlInt(element, "mapsPending"),
WebServicesTestUtils.getXmlInt(element, "mapsRunning"),
WebServicesTestUtils.getXmlInt(element, "reducesPending"),
WebServicesTestUtils.getXmlInt(element, "reducesRunning"),
WebServicesTestUtils.getXmlBoolean(element, "uberized"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlInt(element, "newReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "runningReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "failedReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "killedReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "successfulReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "newMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "runningMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "failedMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "killedMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "successfulMapAttempts"));
Map<JobACL, AccessControlList> allacls = job.getJobACLs();
if (allacls != null) {
for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
String expectName = entry.getKey().getAclName();
String expectValue = entry.getValue().getAclString();
Boolean found = false;
// make sure ws includes it
NodeList id = element.getElementsByTagName("acls");
if (id != null) {
for (int j = 0; j < id.getLength(); j++) {
Element aclElem = (Element) id.item(j);
if (aclElem == null) {
fail("should have acls in the web service info");
}
if (expectName.matches(WebServicesTestUtils.getXmlString(aclElem,
"name"))) {
found = true;
WebServicesTestUtils.checkStringMatch("value", expectValue,
WebServicesTestUtils.getXmlString(aclElem, "value"));
}
}
} else {
fail("should have acls in the web service info");
}
assertTrue("acl: " + expectName + " not found in webservice output",
found);
}
}
}
}
public void verifyAMJobGeneric(Job job, String id, String user, String name,
String state, long startTime, long finishTime, long elapsedTime,
int mapsTotal, int mapsCompleted, int reducesTotal, int reducesCompleted,
float reduceProgress, float mapProgress) {
JobReport report = job.getReport();
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
id);
WebServicesTestUtils.checkStringMatch("user", job.getUserName().toString(),
user);
WebServicesTestUtils.checkStringMatch("name", job.getName(), name);
WebServicesTestUtils.checkStringMatch("state", job.getState().toString(),
state);
assertEquals("startTime incorrect", report.getStartTime(), startTime);
assertEquals("finishTime incorrect", report.getFinishTime(), finishTime);
assertEquals("elapsedTime incorrect",
Times.elapsed(report.getStartTime(), report.getFinishTime()),
elapsedTime);
assertEquals("mapsTotal incorrect", job.getTotalMaps(), mapsTotal);
assertEquals("mapsCompleted incorrect", job.getCompletedMaps(),
mapsCompleted);
assertEquals("reducesTotal incorrect", job.getTotalReduces(), reducesTotal);
assertEquals("reducesCompleted incorrect", job.getCompletedReduces(),
reducesCompleted);
assertEquals("mapProgress incorrect", report.getMapProgress() * 100,
mapProgress, 0);
assertEquals("reduceProgress incorrect", report.getReduceProgress() * 100,
reduceProgress, 0);
}
public void verifyAMJobGenericSecure(Job job, int mapsPending,
int mapsRunning, int reducesPending, int reducesRunning,
Boolean uberized, String diagnostics, int newReduceAttempts,
int runningReduceAttempts, int failedReduceAttempts,
int killedReduceAttempts, int successfulReduceAttempts,
int newMapAttempts, int runningMapAttempts, int failedMapAttempts,
int killedMapAttempts, int successfulMapAttempts) {
String diagString = "";
List<String> diagList = job.getDiagnostics();
if (diagList != null && !diagList.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagList) {
b.append(diag);
}
diagString = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", diagString,
diagnostics);
assertEquals("isUber incorrect", job.isUber(), uberized);
// unfortunately the following fields are all calculated in JobInfo
// so not easily accessible without doing all the calculations again.
// For now just make sure they are present.
assertTrue("mapsPending not >= 0", mapsPending >= 0);
assertTrue("mapsRunning not >= 0", mapsRunning >= 0);
assertTrue("reducesPending not >= 0", reducesPending >= 0);
assertTrue("reducesRunning not >= 0", reducesRunning >= 0);
assertTrue("newReduceAttempts not >= 0", newReduceAttempts >= 0);
assertTrue("runningReduceAttempts not >= 0", runningReduceAttempts >= 0);
assertTrue("failedReduceAttempts not >= 0", failedReduceAttempts >= 0);
assertTrue("killedReduceAttempts not >= 0", killedReduceAttempts >= 0);
assertTrue("successfulReduceAttempts not >= 0",
successfulReduceAttempts >= 0);
assertTrue("newMapAttempts not >= 0", newMapAttempts >= 0);
assertTrue("runningMapAttempts not >= 0", runningMapAttempts >= 0);
assertTrue("failedMapAttempts not >= 0", failedMapAttempts >= 0);
assertTrue("killedMapAttempts not >= 0", killedMapAttempts >= 0);
assertTrue("successfulMapAttempts not >= 0", successfulMapAttempts >= 0);
}
@Test
public void testJobCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("counters")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyAMJobCounters(info, jobsMap.get(id));
}
}
@Test
public void testJobCountersSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("counters/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyAMJobCounters(info, jobsMap.get(id));
}
}
@Test
public void testJobCountersDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyAMJobCounters(info, jobsMap.get(id));
}
}
@Test
public void testJobCountersXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("counters")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("jobCounters");
verifyAMJobCountersXML(info, jobsMap.get(id));
}
}
public void verifyAMJobCounters(JSONObject info, Job job)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("counterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long mapValue = counter.getLong("mapCounterValue");
assertTrue("mapCounterValue >= 0", mapValue >= 0);
long reduceValue = counter.getLong("reduceCounterValue");
assertTrue("reduceCounterValue >= 0", reduceValue >= 0);
long totalValue = counter.getLong("totalCounterValue");
assertTrue("totalCounterValue >= 0", totalValue >= 0);
}
}
}
public void verifyAMJobCountersXML(NodeList nodes, Job job) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
assertNotNull("Job not found - output incorrect", job);
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("counterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long mapValue = WebServicesTestUtils.getXmlLong(counter,
"mapCounterValue");
assertTrue("mapCounterValue not >= 0", mapValue >= 0);
long reduceValue = WebServicesTestUtils.getXmlLong(counter,
"reduceCounterValue");
assertTrue("reduceCounterValue >= 0", reduceValue >= 0);
long totalValue = WebServicesTestUtils.getXmlLong(counter,
"totalCounterValue");
assertTrue("totalCounterValue >= 0", totalValue >= 0);
}
}
}
}
@Test
public void testJobAttempts() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyJobAttempts(info, jobsMap.get(id));
}
}
@Test
public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyJobAttempts(info, jobsMap.get(id));
}
}
@Test
public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyJobAttempts(info, jobsMap.get(id));
}
}
@Test
public void testJobAttemptsXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList attempts = dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements", 1, attempts.getLength());
NodeList info = dom.getElementsByTagName("jobAttempt");
verifyJobAttemptsXML(info, jobsMap.get(id));
}
}
public void verifyJobAttempts(JSONObject info, Job job)
throws JSONException {
JSONArray attempts = info.getJSONArray("jobAttempt");
assertEquals("incorrect number of elements", 2, attempts.length());
for (int i = 0; i < attempts.length(); i++) {
JSONObject attempt = attempts.getJSONObject(i);
verifyJobAttemptsGeneric(job, attempt.getString("nodeHttpAddress"),
attempt.getString("nodeId"), attempt.getInt("id"),
attempt.getLong("startTime"), attempt.getString("containerId"),
attempt.getString("logsLink"));
}
}
public void verifyJobAttemptsXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 2, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyJobAttemptsGeneric(job,
WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
WebServicesTestUtils.getXmlString(element, "nodeId"),
WebServicesTestUtils.getXmlInt(element, "id"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlString(element, "containerId"),
WebServicesTestUtils.getXmlString(element, "logsLink"));
}
}
public void verifyJobAttemptsGeneric(Job job, String nodeHttpAddress,
String nodeId, int id, long startTime, String containerId, String logsLink) {
boolean attemptFound = false;
for (AMInfo amInfo : job.getAMInfos()) {
if (amInfo.getAppAttemptId().getAttemptId() == id) {
attemptFound = true;
String nmHost = amInfo.getNodeManagerHost();
int nmHttpPort = amInfo.getNodeManagerHttpPort();
int nmPort = amInfo.getNodeManagerPort();
WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":"
+ nmHttpPort, nodeHttpAddress);
WebServicesTestUtils.checkStringMatch("nodeId",
NodeId.newInstance(nmHost, nmPort).toString(), nodeId);
assertTrue("startime not greater than 0", startTime > 0);
WebServicesTestUtils.checkStringMatch("containerId", amInfo
.getContainerId().toString(), containerId);
String localLogsLink =ujoin("node", "containerlogs", containerId,
job.getUserName());
assertTrue("logsLink", logsLink.contains(localLogsLink));
}
}
assertTrue("attempt: " + id + " was not found", attemptFound);
}
}
| 39,030
| 41.844127
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.junit.Assert.assertEquals;
import java.io.StringReader;
import java.util.Enumeration;
import java.util.Map;
import java.util.Properties;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Singleton;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the app master web service Rest API for getting task attempts, a
* specific task attempt, and task attempt counters
*
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/state
*/
public class TestAMWebServicesAttempt extends JerseyTest {
private static Configuration conf = new Configuration();
private static AppContext appContext;
private String webserviceUserName = "testuser";
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockAppContext(0, 1, 2, 1);
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
filter("/*").through(TestRMCustomAuthFilter.class);
}
});
@Singleton
public static class TestRMCustomAuthFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = new Properties();
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(configPrefix)) {
String value = filterConfig.getInitParameter(name);
props.put(name.substring(configPrefix.length()), value);
}
}
props.put(AuthenticationFilter.AUTH_TYPE, "simple");
props.put(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
return props;
}
}
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestAMWebServicesAttempt() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testGetTaskAttemptIdState() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("state")
.queryParam("user.name", webserviceUserName)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals(att.getState().toString(), json.get("state"));
}
}
}
}
@Test
public void testGetTaskAttemptIdXMLState() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("state")
.queryParam("user.name", webserviceUserName)
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("jobTaskAttemptState");
assertEquals(1, nodes.getLength());
String state = WebServicesTestUtils.getXmlString(
(Element) nodes.item(0), "state");
assertEquals(att.getState().toString(), state);
}
}
}
}
@Test
public void testPutTaskAttemptIdState() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("state")
.queryParam("user.name", webserviceUserName)
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.put(ClientResponse.class, "{\"state\":\"KILLED\"}");
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals(TaskAttemptState.KILLED.toString(), json.get("state"));
}
}
}
}
@Test
public void testPutTaskAttemptIdXMLState() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).path("state")
.queryParam("user.name", webserviceUserName)
.accept(MediaType.APPLICATION_XML_TYPE)
.type(MediaType.APPLICATION_XML_TYPE)
.put(ClientResponse.class,
"<jobTaskAttemptState><state>KILLED" +
"</state></jobTaskAttemptState>");
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("jobTaskAttemptState");
assertEquals(1, nodes.getLength());
String state = WebServicesTestUtils.getXmlString(
(Element) nodes.item(0), "state");
assertEquals(TaskAttemptState.KILLED.toString(), state);
}
}
}
}
}
| 10,903
| 38.79562
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/ViewForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import org.apache.hadoop.yarn.webapp.View;
/**
* override method render() for test
*/
public class ViewForTest extends View{
@Override
public void render() {
}
}
| 1,038
| 30.484848
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.StringReader;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.common.collect.Maps;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the app master web service Rest API for getting the job conf. This
* requires created a temporary configuration file.
*
* /ws/v1/mapreduce/job/{jobid}/conf
*/
public class TestAMWebServicesJobConf extends JerseyTest {
private static Configuration conf = new Configuration();
private static AppContext appContext;
private static File testConfDir = new File("target",
TestAMWebServicesJobConf.class.getSimpleName() + "confDir");
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
Path confPath = new Path(testConfDir.toString(),
MRJobConfig.JOB_CONF_FILE);
Configuration config = new Configuration();
FileSystem localFs;
try {
localFs = FileSystem.getLocal(config);
confPath = localFs.makeQualified(confPath);
OutputStream out = localFs.create(confPath);
try {
conf.writeXml(out);
} finally {
out.close();
}
if (!localFs.exists(confPath)) {
fail("error creating config file: " + confPath);
}
} catch (IOException e) {
fail("error creating config file: " + e.getMessage());
}
appContext = new MockAppContext(0, 2, 1, confPath);
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testConfDir.mkdir();
}
@AfterClass
static public void stop() {
FileUtil.fullyDelete(testConfDir);
}
public TestAMWebServicesJobConf() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testJobConf() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("conf")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyAMJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("conf/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyAMJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyAMJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("conf")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("conf");
verifyAMJobConfXML(info, jobsMap.get(id));
}
}
public void verifyAMJobConf(JSONObject info, Job job) throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("path", job.getConfFile().toString(),
info.getString("path"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray properties = info.getJSONArray("property");
for (int i = 0; i < properties.length(); i++) {
JSONObject prop = properties.getJSONObject(i);
String name = prop.getString("name");
String value = prop.getString("value");
assertTrue("name not set", (name != null && !name.isEmpty()));
assertTrue("value not set", (value != null && !value.isEmpty()));
}
}
public void verifyAMJobConfXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("path", job.getConfFile()
.toString(), WebServicesTestUtils.getXmlString(element, "path"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList properties = element.getElementsByTagName("property");
for (int j = 0; j < properties.getLength(); j++) {
Element property = (Element) properties.item(j);
assertNotNull("should have counters in the web service info", property);
String name = WebServicesTestUtils.getXmlString(property, "name");
String value = WebServicesTestUtils.getXmlString(property, "value");
assertTrue("name not set", (name != null && !name.isEmpty()));
assertTrue("name not set", (value != null && !value.isEmpty()));
}
}
}
}
| 10,248
| 36.134058
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.APP_ID;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import javax.net.ssl.SSLException;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpConfig.Policy;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils;
import org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer;
import org.apache.hadoop.yarn.webapp.WebApps;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.http.HttpStatus;
import org.junit.Test;
import com.google.common.net.HttpHeaders;
import com.google.inject.Injector;
public class TestAMWebApp {
@Test public void testAppControllerIndex() {
AppContext ctx = new MockAppContext(0, 1, 1, 1);
Injector injector = WebAppTests.createMockInjector(AppContext.class, ctx);
AppController controller = injector.getInstance(AppController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(), controller.get(APP_ID,""));
}
@Test public void testAppView() {
WebAppTests.testPage(AppView.class, AppContext.class, new MockAppContext(0, 1, 1, 1));
}
@Test public void testJobView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = getJobParams(appContext);
WebAppTests.testPage(JobPage.class, AppContext.class, appContext, params);
}
@Test public void testTasksView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = getTaskParams(appContext);
WebAppTests.testPage(TasksPage.class, AppContext.class, appContext, params);
}
@Test public void testTaskView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = getTaskParams(appContext);
App app = new App(appContext);
app.setJob(appContext.getAllJobs().values().iterator().next());
app.setTask(app.getJob().getTasks().values().iterator().next());
WebAppTests.testPage(TaskPage.class, App.class, app, params);
}
public static Map<String, String> getJobParams(AppContext appContext) {
JobId jobId = appContext.getAllJobs().entrySet().iterator().next().getKey();
Map<String, String> params = new HashMap<String, String>();
params.put(AMParams.JOB_ID, MRApps.toString(jobId));
return params;
}
public static Map<String, String> getTaskParams(AppContext appContext) {
JobId jobId = appContext.getAllJobs().entrySet().iterator().next().getKey();
Entry<TaskId, Task> e = appContext.getJob(jobId).getTasks().entrySet().iterator().next();
e.getValue().getType();
Map<String, String> params = new HashMap<String, String>();
params.put(AMParams.JOB_ID, MRApps.toString(jobId));
params.put(AMParams.TASK_ID, MRApps.toString(e.getKey()));
params.put(AMParams.TASK_TYPE, MRApps.taskSymbol(e.getValue().getType()));
return params;
}
@Test public void testConfView() {
WebAppTests.testPage(JobConfPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
}
@Test public void testCountersView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = getJobParams(appContext);
WebAppTests.testPage(CountersPage.class, AppContext.class,
appContext, params);
}
@Test public void testSingleCounterView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Job job = appContext.getAllJobs().values().iterator().next();
// add a failed task to the job without any counters
Task failedTask = MockJobs.newTask(job.getID(), 2, 1, true);
Map<TaskId,Task> tasks = job.getTasks();
tasks.put(failedTask.getID(), failedTask);
Map<String, String> params = getJobParams(appContext);
params.put(AMParams.COUNTER_GROUP,
"org.apache.hadoop.mapreduce.FileSystemCounter");
params.put(AMParams.COUNTER_NAME, "HDFS_WRITE_OPS");
WebAppTests.testPage(SingleCounterPage.class, AppContext.class,
appContext, params);
}
@Test public void testTaskCountersView() {
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = getTaskParams(appContext);
WebAppTests.testPage(CountersPage.class, AppContext.class,
appContext, params);
}
@Test public void testSingleTaskCounterView() {
AppContext appContext = new MockAppContext(0, 1, 1, 2);
Map<String, String> params = getTaskParams(appContext);
params.put(AMParams.COUNTER_GROUP,
"org.apache.hadoop.mapreduce.FileSystemCounter");
params.put(AMParams.COUNTER_NAME, "HDFS_WRITE_OPS");
// remove counters from one task attempt
// to test handling of missing counters
TaskId taskID = MRApps.toTaskID(params.get(AMParams.TASK_ID));
Job job = appContext.getJob(taskID.getJobId());
Task task = job.getTask(taskID);
TaskAttempt attempt = task.getAttempts().values().iterator().next();
attempt.getReport().setCounters(null);
WebAppTests.testPage(SingleCounterPage.class, AppContext.class,
appContext, params);
}
@Test
public void testMRWebAppSSLDisabled() throws Exception {
MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true) {
@Override
protected ClientService createClientService(AppContext context) {
return new MRClientService(context);
}
};
Configuration conf = new Configuration();
// MR is explicitly disabling SSL, even though setting as HTTPS_ONLY
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, Policy.HTTPS_ONLY.name());
Job job = app.submit(conf);
String hostPort =
NetUtils.getHostPortString(((MRClientService) app.getClientService())
.getWebApp().getListenerAddress());
// http:// should be accessible
URL httpUrl = new URL("http://" + hostPort);
HttpURLConnection conn = (HttpURLConnection) httpUrl.openConnection();
InputStream in = conn.getInputStream();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(in, out, 1024);
Assert.assertTrue(out.toString().contains("MapReduce Application"));
// https:// is not accessible.
URL httpsUrl = new URL("https://" + hostPort);
try {
HttpURLConnection httpsConn =
(HttpURLConnection) httpsUrl.openConnection();
httpsConn.getInputStream();
Assert.fail("https:// is not accessible, expected to fail");
} catch (Exception e) {
Assert.assertTrue(e instanceof SSLException);
}
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
static String webProxyBase = null;
public static class TestAMFilterInitializer extends AmFilterInitializer {
@Override
protected String getApplicationWebProxyBase() {
return webProxyBase;
}
}
@Test
public void testMRWebAppRedirection() throws Exception {
String[] schemePrefix =
{ WebAppUtils.HTTP_PREFIX, WebAppUtils.HTTPS_PREFIX };
for (String scheme : schemePrefix) {
MRApp app = new MRApp(2, 2, true, this.getClass().getName(), true) {
@Override
protected ClientService createClientService(AppContext context) {
return new MRClientService(context);
}
};
Configuration conf = new Configuration();
conf.set(YarnConfiguration.PROXY_ADDRESS, "9.9.9.9");
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, scheme
.equals(WebAppUtils.HTTPS_PREFIX) ? Policy.HTTPS_ONLY.name()
: Policy.HTTP_ONLY.name());
webProxyBase = "/proxy/" + app.getAppID();
conf.set("hadoop.http.filter.initializers",
TestAMFilterInitializer.class.getName());
Job job = app.submit(conf);
String hostPort =
NetUtils.getHostPortString(((MRClientService) app.getClientService())
.getWebApp().getListenerAddress());
URL httpUrl = new URL("http://" + hostPort + "/mapreduce");
HttpURLConnection conn = (HttpURLConnection) httpUrl.openConnection();
conn.setInstanceFollowRedirects(false);
conn.connect();
String expectedURL =
scheme + conf.get(YarnConfiguration.PROXY_ADDRESS)
+ ProxyUriUtils.getPath(app.getAppID(), "/mapreduce");
Assert.assertEquals(expectedURL,
conn.getHeaderField(HttpHeaders.LOCATION));
Assert.assertEquals(HttpStatus.SC_MOVED_TEMPORARILY,
conn.getResponseCode());
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
}
}
public static void main(String[] args) {
WebApps.$for("yarn", AppContext.class, new MockAppContext(0, 8, 88, 4)).
at(58888).inDevMode().start(new AMWebApp()).joinThread();
}
}
| 10,732
| 39.198502
| 93
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringReader;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the app master web service Rest API for getting tasks, a specific task,
* and task counters.
*
* /ws/v1/mapreduce/jobs/{jobid}/tasks
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}
* /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/counters
*/
public class TestAMWebServicesTasks extends JerseyTest {
private static Configuration conf = new Configuration();
private static AppContext appContext;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockAppContext(0, 1, 2, 1);
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestAMWebServicesTasks() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testTasks() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyAMTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyAMTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyAMTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList tasks = dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements", 1, tasks.getLength());
NodeList task = dom.getElementsByTagName("task");
verifyAMTaskXML(task, jobsMap.get(id));
}
}
@Test
public void testTasksQueryMap() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String type = "m";
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").queryParam("type", type)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 1, arr.length());
verifyAMTask(arr, jobsMap.get(id), type);
}
}
@Test
public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String type = "r";
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").queryParam("type", type)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 1, arr.length());
verifyAMTask(arr, jobsMap.get(id), type);
}
}
@Test
public void testTasksQueryInvalid() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
// tasktype must be exactly either "m" or "r"
String tasktype = "reduce";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").queryParam("type", tasktype)
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: tasktype must be either m or r", message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
}
@Test
public void testTaskId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyAMSingleTask(info, task);
}
}
}
@Test
public void testTaskIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid + "/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyAMSingleTask(info, task);
}
}
}
@Test
public void testTaskIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyAMSingleTask(info, task);
}
}
}
@Test
public void testTaskIdBogus() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "bogustaskid";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "bogustaskid is not properly formed"
+ "\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,11 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdNonExist() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_m_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: task not found with id task_0_0000_m_000000",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_d_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0_0000_d_000000 is not properly formed"
+ "\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,20 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid2() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_m_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0_m_000000 is not properly formed"
+ "\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,15 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid3() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_m";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
.path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0_0000_m is not properly formed"
+ "\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,13 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("task");
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyAMSingleTaskXML(element, task);
}
}
}
}
public void verifyAMSingleTask(JSONObject info, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 9, info.length());
verifyTaskGeneric(task, info.getString("id"), info.getString("state"),
info.getString("type"), info.getString("successfulAttempt"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), (float) info.getDouble("progress"),
info.getString("status"));
}
public void verifyAMTask(JSONArray arr, Job job, String type)
throws JSONException {
for (Task task : job.getTasks().values()) {
TaskId id = task.getID();
String tid = MRApps.toString(id);
Boolean found = false;
if (type != null && task.getType() == MRApps.taskType(type)) {
for (int i = 0; i < arr.length(); i++) {
JSONObject info = arr.getJSONObject(i);
if (tid.matches(info.getString("id"))) {
found = true;
verifyAMSingleTask(info, task);
}
}
assertTrue("task with id: " + tid + " not in web service output", found);
}
}
}
public void verifyTaskGeneric(Task task, String id, String state,
String type, String successfulAttempt, long startTime, long finishTime,
long elapsedTime, float progress, String status) {
TaskId taskid = task.getID();
String tid = MRApps.toString(taskid);
TaskReport report = task.getReport();
WebServicesTestUtils.checkStringMatch("id", tid, id);
WebServicesTestUtils.checkStringMatch("type", task.getType().toString(),
type);
WebServicesTestUtils.checkStringMatch("state", report.getTaskState()
.toString(), state);
// not easily checked without duplicating logic, just make sure its here
assertNotNull("successfulAttempt null", successfulAttempt);
assertEquals("startTime wrong", report.getStartTime(), startTime);
assertEquals("finishTime wrong", report.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", report.getProgress() * 100, progress, 1e-3f);
assertEquals("status wrong", report.getStatus(), status);
}
public void verifyAMSingleTaskXML(Element element, Task task) {
verifyTaskGeneric(task, WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "type"),
WebServicesTestUtils.getXmlString(element, "successfulAttempt"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlFloat(element, "progress"),
WebServicesTestUtils.getXmlString(element, "status"));
}
public void verifyAMTaskXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 2, nodes.getLength());
for (Task task : job.getTasks().values()) {
TaskId id = task.getID();
String tid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
if (tid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
found = true;
verifyAMSingleTaskXML(element, task);
}
}
assertTrue("task with id: " + tid + " not in web service output", found);
}
}
@Test
public void testTaskIdCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("counters")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info, task);
}
}
}
@Test
public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("counters/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info, task);
}
}
}
@Test
public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("counters")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info, task);
}
}
}
@Test
public void testJobTaskCountersXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid).path("counters")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("jobTaskCounters");
verifyAMTaskCountersXML(info, task);
}
}
}
public void verifyAMJobTaskCounters(JSONObject info, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(task.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("taskCounterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("name not set",
(counterName != null && !counterName.isEmpty()));
long value = counter.getLong("value");
assertTrue("value >= 0", value >= 0);
}
}
}
public void verifyAMTaskCountersXML(NodeList nodes, Task task) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("id",
MRApps.toString(task.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("taskCounterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long value = WebServicesTestUtils.getXmlLong(counter, "value");
assertTrue("value not >= 0", value >= 0);
}
}
}
}
}
| 33,555
| 42.635891
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlockForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import java.util.HashMap;
import java.util.Map;
/**
* Class TasksBlockForTest overrides some methods for test
*/
public class TasksBlockForTest extends TasksBlock{
private final Map<String, String> params = new HashMap<String, String>();
public TasksBlockForTest(App app) {
super(app);
}
public void addParameter(String name, String value) {
params.put(name, value);
}
@Override
public String $(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
public String url(String... parts) {
String result = "url://";
for (String string : parts) {
result += string +":";
}
return result;
}
}
| 1,557
| 30.16
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.StringReader;
import java.util.Set;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.common.collect.Sets;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the MapReduce Application master info web services api's. Also test
* non-existent urls.
*
* /ws/v1/mapreduce
* /ws/v1/mapreduce/info
*/
public class TestAMWebServices extends JerseyTest {
private static Configuration conf = new Configuration();
private static MockAppContext appContext;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockAppContext(0, 1, 1, 1);
appContext.setBlacklistedNodes(Sets.newHashSet("badnode1", "badnode2"));
bind(JAXBContextResolver.class);
bind(AMWebServices.class);
bind(GenericExceptionHandler.class);
bind(AppContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestAMWebServices() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.app.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testAM() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testAMSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testAMDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce/")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testAMXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
verifyAMInfoXML(xml, appContext);
}
@Test
public void testInfo() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("info").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testInfoSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("info/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testInfoDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("info/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyAMInfo(json.getJSONObject("info"), appContext);
}
@Test
public void testInfoXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("info/").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
verifyAMInfoXML(xml, appContext);
}
@Test
public void testInvalidUri() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("mapreduce").path("bogus")
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidUri2() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("invalid")
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidAccept() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("mapreduce")
.accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,
response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testBlacklistedNodes() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("blacklistednodes").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyBlacklistedNodesInfo(json, appContext);
}
@Test
public void testBlacklistedNodesXML() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.path("blacklistednodes").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
verifyBlacklistedNodesInfoXML(xml, appContext);
}
public void verifyAMInfo(JSONObject info, AppContext ctx)
throws JSONException {
assertEquals("incorrect number of elements", 5, info.length());
verifyAMInfoGeneric(ctx, info.getString("appId"), info.getString("user"),
info.getString("name"), info.getLong("startedOn"),
info.getLong("elapsedTime"));
}
public void verifyAMInfoXML(String xml, AppContext ctx)
throws JSONException, Exception {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("info");
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyAMInfoGeneric(ctx,
WebServicesTestUtils.getXmlString(element, "appId"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlString(element, "name"),
WebServicesTestUtils.getXmlLong(element, "startedOn"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"));
}
}
public void verifyAMInfoGeneric(AppContext ctx, String id, String user,
String name, long startedOn, long elapsedTime) {
WebServicesTestUtils.checkStringMatch("id", ctx.getApplicationID()
.toString(), id);
WebServicesTestUtils.checkStringMatch("user", ctx.getUser().toString(),
user);
WebServicesTestUtils.checkStringMatch("name", ctx.getApplicationName(),
name);
assertEquals("startedOn incorrect", ctx.getStartTime(), startedOn);
assertTrue("elapsedTime not greater then 0", (elapsedTime > 0));
}
public void verifyBlacklistedNodesInfo(JSONObject blacklist, AppContext ctx)
throws JSONException, Exception{
JSONArray array = blacklist.getJSONArray("blacklistedNodes");
assertEquals(array.length(), ctx.getBlacklistedNodes().size());
for (int i = 0; i < array.length(); i++) {
assertTrue(ctx.getBlacklistedNodes().contains(array.getString(i)));
}
}
public void verifyBlacklistedNodesInfoXML(String xml, AppContext ctx)
throws JSONException, Exception {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList infonodes = dom.getElementsByTagName("blacklistednodesinfo");
assertEquals("incorrect number of elements", 1, infonodes.getLength());
NodeList nodes = dom.getElementsByTagName("blacklistedNodes");
Set<String> blacklistedNodes = ctx.getBlacklistedNodes();
assertEquals("incorrect number of elements", blacklistedNodes.size(),
nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
assertTrue(
blacklistedNodes.contains(element.getFirstChild().getNodeValue()));
}
}
}
| 13,694
| 38.580925
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import java.io.ByteArrayOutputStream;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.AttemptsPage.FewAttemptsBlock;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.webapp.view.BlockForTest;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block;
import static org.mockito.Mockito.*;
import static org.junit.Assert.*;
public class TestBlocks {
private ByteArrayOutputStream data = new ByteArrayOutputStream();
/**
* Test rendering for ConfBlock
*/
@Test
public void testConfigurationBlock() throws Exception {
AppContext ctx = mock(AppContext.class);
Job job = mock(Job.class);
Path path = new Path("conf");
Configuration configuration = new Configuration();
configuration.set("Key for test", "Value for test");
when(job.getConfFile()).thenReturn(path);
when(job.loadConfFile()).thenReturn(configuration);
when(ctx.getJob(any(JobId.class))).thenReturn(job);
ConfBlockForTest configurationBlock = new ConfBlockForTest(ctx);
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
configurationBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains(
"Sorry, can't do anything without a JobID"));
configurationBlock.addParameter(AMParams.JOB_ID, "job_01_01");
data.reset();
configurationBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains("Key for test"));
assertTrue(data.toString().contains("Value for test"));
}
/**
* Test rendering for TasksBlock
*/
@Test
public void testTasksBlock() throws Exception {
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1);
JobId jobId = new JobIdPBImpl();
jobId.setId(0);
jobId.setAppId(appId);
TaskId taskId = new TaskIdPBImpl();
taskId.setId(0);
taskId.setTaskType(TaskType.MAP);
taskId.setJobId(jobId);
Task task = mock(Task.class);
when(task.getID()).thenReturn(taskId);
TaskReport report = mock(TaskReport.class);
when(report.getProgress()).thenReturn(0.7f);
when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
when(report.getStartTime()).thenReturn(100001L);
when(report.getFinishTime()).thenReturn(100011L);
when(report.getStatus()).thenReturn("Dummy Status \n*");
when(task.getReport()).thenReturn(report);
when(task.getType()).thenReturn(TaskType.MAP);
Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
tasks.put(taskId, task);
AppContext ctx = mock(AppContext.class);
Job job = mock(Job.class);
when(job.getTasks()).thenReturn(tasks);
App app = new App(ctx);
app.setJob(job);
TasksBlockForTest taskBlock = new TasksBlockForTest(app);
taskBlock.addParameter(AMParams.TASK_TYPE, "m");
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
taskBlock.render(html);
pWriter.flush();
assertTrue(data.toString().contains("task_0_0001_m_000000"));
assertTrue(data.toString().contains("70.00"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertTrue(data.toString().contains("100001"));
assertTrue(data.toString().contains("100011"));
assertFalse(data.toString().contains("Dummy Status \n*"));
assertTrue(data.toString().contains("Dummy Status \\n*"));
}
/**
* test AttemptsBlock's rendering.
*/
@Test
public void testAttemptsBlock() {
AppContext ctx = mock(AppContext.class);
AppForTest app = new AppForTest(ctx);
JobId jobId = new JobIdPBImpl();
jobId.setId(0);
jobId.setAppId(ApplicationIdPBImpl.newInstance(0,1));
TaskId taskId = new TaskIdPBImpl();
taskId.setId(0);
taskId.setTaskType(TaskType.REDUCE);
taskId.setJobId(jobId);
Task task = mock(Task.class);
when(task.getID()).thenReturn(taskId);
TaskReport report = mock(TaskReport.class);
when(task.getReport()).thenReturn(report);
when(task.getType()).thenReturn(TaskType.REDUCE);
Map<TaskId, Task> tasks =
new HashMap<TaskId, Task>();
Map<TaskAttemptId, TaskAttempt> attempts =
new HashMap<TaskAttemptId, TaskAttempt>();
TaskAttempt attempt = mock(TaskAttempt.class);
TaskAttemptId taId = new TaskAttemptIdPBImpl();
taId.setId(0);
taId.setTaskId(task.getID());
when(attempt.getID()).thenReturn(taId);
final TaskAttemptState taState = TaskAttemptState.SUCCEEDED;
when(attempt.getState()).thenReturn(taState);
TaskAttemptReport taReport = mock(TaskAttemptReport.class);
when(taReport.getTaskAttemptState()).thenReturn(taState);
when(attempt.getReport()).thenReturn(taReport);
attempts.put(taId, attempt);
tasks.put(taskId, task);
when(task.getAttempts()).thenReturn(attempts);
app.setTask(task);
Job job = mock(Job.class);
when(job.getTasks(TaskType.REDUCE)).thenReturn(tasks);
app.setJob(job);
AttemptsBlockForTest block = new AttemptsBlockForTest(app,
new Configuration());
block.addParameter(AMParams.TASK_TYPE, "r");
block.addParameter(AMParams.ATTEMPT_STATE, "SUCCESSFUL");
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains(
"<a href='" + block.url("task",task.getID().toString()) +"'>"
+"attempt_0_0001_r_000000_0</a>"));
}
private class ConfBlockForTest extends ConfBlock {
private final Map<String, String> params = new HashMap<String, String>();
public void addParameter(String name, String value) {
params.put(name, value);
}
@Override
public String $(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
ConfBlockForTest(AppContext appCtx) {
super(appCtx);
}
}
private class HtmlBlockForTest extends HtmlBlock {
@Override
protected void render(Block html) {
}
}
private class AttemptsBlockForTest extends FewAttemptsBlock {
private final Map<String, String> params = new HashMap<String, String>();
public void addParameter(String name, String value) {
params.put(name, value);
}
public String $(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
public AttemptsBlockForTest(App ctx, Configuration conf) {
super(ctx, conf);
}
@Override
public String url(String... parts) {
String result = "url://";
for (String string : parts) {
result += string + ":";
}
return result;
}
}
}
| 8,897
| 32.961832
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppControllerForTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.webapp;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.webapp.ResponseInfo;
import org.apache.hadoop.yarn.webapp.View;
import static org.mockito.Mockito.*;
/**
* Class AppControllerForTest overrides some methods of AppController for test
*/
public class AppControllerForTest extends AppController {
final private static Map<String, String> properties = new HashMap<String, String>();
private ResponseInfo responseInfo = new ResponseInfo();
private View view = new ViewForTest();
private Class<?> clazz;
private HttpServletResponse response;
protected AppControllerForTest(App app, Configuration configuration, RequestContext ctx) {
super(app, configuration, ctx);
}
public Class<?> getClazz() {
return clazz;
}
@SuppressWarnings("unchecked")
public <T> T getInstance(Class<T> cls) {
clazz = cls;
if (cls.equals(ResponseInfo.class)) {
return (T) responseInfo;
}
return (T) view;
}
public ResponseInfo getResponseInfo() {
return responseInfo;
}
public String get(String key, String defaultValue) {
String result = properties.get(key);
if (result == null) {
result = defaultValue;
}
return result;
}
public void set(String key, String value) {
properties.put(key, value);
}
public HttpServletRequest request() {
HttpServletRequest result = mock(HttpServletRequest.class);
when(result.getRemoteUser()).thenReturn("user");
return result;
}
@Override
public HttpServletResponse response() {
if (response == null) {
response = mock(HttpServletResponse.class);
}
return response;
}
public Map<String, String> getProperty() {
return properties;
}
OutputStream data = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(data);
public String getData() {
writer.flush();
return data.toString();
}
protected PrintWriter writer() {
if (writer == null) {
writer = new PrintWriter(data);
}
return writer;
}
}
| 3,127
| 25.965517
| 92
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.rm;
import org.apache.hadoop.yarn.api.records.Resource;
import org.junit.Assert;
import org.junit.Test;
import java.util.EnumSet;
import static org.apache.hadoop.yarn.proto.YarnServiceProtos.*;
public class TestResourceCalculatorUtils {
@Test
public void testComputeAvailableContainers() throws Exception {
Resource clusterAvailableResources = Resource.newInstance(81920, 40);
Resource nonZeroResource = Resource.newInstance(1024, 2);
int expectedNumberOfContainersForMemory = 80;
int expectedNumberOfContainersForCPU = 20;
verifyDifferentResourceTypes(clusterAvailableResources, nonZeroResource,
expectedNumberOfContainersForMemory,
expectedNumberOfContainersForCPU);
Resource zeroMemoryResource = Resource.newInstance(0,
nonZeroResource.getVirtualCores());
verifyDifferentResourceTypes(clusterAvailableResources, zeroMemoryResource,
Integer.MAX_VALUE,
expectedNumberOfContainersForCPU);
Resource zeroCpuResource = Resource.newInstance(nonZeroResource.getMemory(),
0);
verifyDifferentResourceTypes(clusterAvailableResources, zeroCpuResource,
expectedNumberOfContainersForMemory,
expectedNumberOfContainersForMemory);
}
private void verifyDifferentResourceTypes(Resource clusterAvailableResources,
Resource nonZeroResource, int expectedNumberOfContainersForMemoryOnly,
int expectedNumberOfContainersOverall) {
Assert.assertEquals("Incorrect number of available containers for Memory",
expectedNumberOfContainersForMemoryOnly,
ResourceCalculatorUtils.computeAvailableContainers(
clusterAvailableResources, nonZeroResource,
EnumSet.of(SchedulerResourceTypes.MEMORY)));
Assert.assertEquals("Incorrect number of available containers overall",
expectedNumberOfContainersOverall,
ResourceCalculatorUtils.computeAvailableContainers(
clusterAvailableResources, nonZeroResource,
EnumSet.of(SchedulerResourceTypes.CPU,
SchedulerResourceTypes.MEMORY)));
}
}
| 2,938
| 37.671053
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.rm;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyFloat;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
@SuppressWarnings("unchecked")
public class TestRMContainerAllocator {
static final Log LOG = LogFactory
.getLog(TestRMContainerAllocator.class);
static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
@Before
public void setup() {
MyContainerAllocator.getJobUpdatedNodeEvents().clear();
MyContainerAllocator.getTaskAttemptKillEvents().clear();
// make each test create a fresh user to avoid leaking tokens between tests
UserGroupInformation.setLoginUser(null);
}
@After
public void tearDown() {
DefaultMetricsSystem.shutdown();
}
@Test
public void testSimple() throws Exception {
LOG.info("Running testSimple");
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
// create the container request
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
// send 1 more request with different resource req
ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
new String[] { "h2" });
allocator.sendRequest(event2);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
Assert.assertEquals(4, rm.getMyFifoScheduler().lastAsk.size());
// send another request with different resource and priority
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
new String[] { "h3" });
allocator.sendRequest(event3);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
Assert.assertEquals(3, rm.getMyFifoScheduler().lastAsk.size());
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
nodeManager2.nodeHeartbeat(true); // Node heartbeat
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(0, rm.getMyFifoScheduler().lastAsk.size());
checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
assigned, false);
// check that the assigned container requests are cancelled
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(5, rm.getMyFifoScheduler().lastAsk.size());
}
@Test
public void testMapNodeLocality() throws Exception {
// test checks that ordering of allocated containers list from the RM does
// not affect the map->container assignment done by the AM. If there is a
// node local container available for a map then it should be assigned to
// that container and not a rack-local container that happened to be seen
// earlier in the allocated containers list from the RM.
// Regression test for MAPREDUCE-4893
LOG.info("Running testMapNodeLocality");
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 3072); // can assign 2 maps
rm.registerNode("h2:1234", 10240); // wont heartbeat on node local node
MockNM nodeManager3 = rm.registerNode("h3:1234", 1536); // assign 1 map
dispatcher.await();
// create the container requests for maps
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
new String[] { "h1" });
allocator.sendRequest(event2);
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
new String[] { "h2" });
allocator.sendRequest(event3);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// update resources in scheduler
// Node heartbeat from rack-local first. This makes node h3 the first in the
// list of allocated containers but it should not be assigned to task1.
nodeManager3.nodeHeartbeat(true);
// Node heartbeat from node-local next. This allocates 2 node local
// containers for task1 and task2. These should be matched with those tasks.
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
assigned, false);
// remove the rack-local assignment that should have happened for task3
for(TaskAttemptContainerAssignedEvent event : assigned) {
if(event.getTaskAttemptID().equals(event3.getAttemptID())) {
assigned.remove(event);
Assert.assertTrue(
event.getContainer().getNodeId().getHost().equals("h3"));
break;
}
}
checkAssignments(new ContainerRequestEvent[] { event1, event2},
assigned, true);
}
@Test
public void testResource() throws Exception {
LOG.info("Running testResource");
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
// create the container request
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
// send 1 more request with different resource req
ContainerRequestEvent event2 = createReq(jobId, 2, 2048,
new String[] { "h2" });
allocator.sendRequest(event2);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
nodeManager2.nodeHeartbeat(true); // Node heartbeat
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[] { event1, event2 },
assigned, false);
}
@Test(timeout = 30000)
public void testReducerRampdownDiagnostics() throws Exception {
LOG.info("Running tesReducerRampdownDiagnostics");
final Configuration conf = new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
final DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
final RMApp app = rm.submitApp(1024);
dispatcher.await();
final String host = "host1";
final MockNM nm = rm.registerNode(String.format("%s:1234", host), 2048);
nm.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
final JobId jobId = MRBuilderUtils
.newJobId(appAttemptId.getApplicationId(), 0);
final Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
final MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
dispatcher.await();
// create the container request
final String[] locations = new String[] { host };
allocator.sendRequest(createReq(jobId, 0, 1024, locations, false, true));
for (int i = 0; i < 1;) {
dispatcher.await();
i += allocator.schedule().size();
nm.nodeHeartbeat(true);
}
allocator.sendRequest(createReq(jobId, 0, 1024, locations, true, false));
while (allocator.getTaskAttemptKillEvents().size() == 0) {
dispatcher.await();
allocator.schedule().size();
nm.nodeHeartbeat(true);
}
final String killEventMessage = allocator.getTaskAttemptKillEvents().get(0)
.getMessage();
Assert.assertTrue("No reducer rampDown preemption message",
killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
}
@Test(timeout = 30000)
public void testPreemptReducers() throws Exception {
LOG.info("Running testPreemptReducers");
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob, new SystemClock());
allocator.setMapResourceRequest(BuilderUtils.newResource(1024, 1));
allocator.setReduceResourceRequest(BuilderUtils.newResource(1024, 1));
RMContainerAllocator.AssignedRequests assignedRequests =
allocator.getAssignedRequests();
RMContainerAllocator.ScheduledRequests scheduledRequests =
allocator.getScheduledRequests();
ContainerRequestEvent event1 =
createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
scheduledRequests.maps.put(mock(TaskAttemptId.class),
new RMContainerRequestor.ContainerRequest(event1, null,null));
assignedRequests.reduces.put(mock(TaskAttemptId.class),
mock(Container.class));
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is not preempted",
1, assignedRequests.preemptionWaitingReduces.size());
}
@Test(timeout = 30000)
public void testNonAggressivelyPreemptReducers() throws Exception {
LOG.info("Running testNonAggressivelyPreemptReducers");
final int preemptThreshold = 2; //sec
Configuration conf = new Configuration();
conf.setInt(
MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
preemptThreshold);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
ControlledClock clock = new ControlledClock(null);
clock.setTime(1);
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob, clock);
allocator.setMapResourceRequest(BuilderUtils.newResource(1024, 1));
allocator.setReduceResourceRequest(BuilderUtils.newResource(1024, 1));
RMContainerAllocator.AssignedRequests assignedRequests =
allocator.getAssignedRequests();
RMContainerAllocator.ScheduledRequests scheduledRequests =
allocator.getScheduledRequests();
ContainerRequestEvent event1 =
createReq(jobId, 1, 2048, new String[] { "h1" }, false, false);
scheduledRequests.maps.put(mock(TaskAttemptId.class),
new RMContainerRequestor.ContainerRequest(event1, null, clock.getTime()));
assignedRequests.reduces.put(mock(TaskAttemptId.class),
mock(Container.class));
clock.setTime(clock.getTime() + 1);
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is aggressively preeempted", 0,
assignedRequests.preemptionWaitingReduces.size());
clock.setTime(clock.getTime() + (preemptThreshold) * 1000);
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is not preeempted", 1,
assignedRequests.preemptionWaitingReduces.size());
}
@Test
public void testMapReduceAllocationWithNodeLabelExpression() throws Exception {
LOG.info("Running testMapReduceAllocationWithNodeLabelExpression");
Configuration conf = new Configuration();
/*
* final int MAP_LIMIT = 3; final int REDUCE_LIMIT = 1;
* conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
* conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
*/
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0f);
conf.set(MRJobConfig.MAP_NODE_LABEL_EXP, "MapNodes");
conf.set(MRJobConfig.REDUCE_NODE_LABEL_EXP, "ReduceNodes");
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1);
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
MyContainerAllocator allocator =
new MyContainerAllocator(null, conf, appAttemptId, mockJob) {
@Override
protected void register() {
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return mockScheduler;
}
};
// create some map requests
ContainerRequestEvent reqMapEvents;
reqMapEvents = createReq(jobId, 0, 1024, new String[] { "map" });
allocator.sendRequests(Arrays.asList(reqMapEvents));
// create some reduce requests
ContainerRequestEvent reqReduceEvents;
reqReduceEvents =
createReq(jobId, 0, 2048, new String[] { "reduce" }, false, true);
allocator.sendRequests(Arrays.asList(reqReduceEvents));
allocator.schedule();
// verify all of the host-specific asks were sent plus one for the
// default rack and one for the ANY request
Assert.assertEquals(3, mockScheduler.lastAsk.size());
// verify ResourceRequest sent for MAP have appropriate node
// label expression as per the configuration
validateLabelsRequests(mockScheduler.lastAsk.get(0), false);
validateLabelsRequests(mockScheduler.lastAsk.get(1), false);
validateLabelsRequests(mockScheduler.lastAsk.get(2), false);
// assign a map task and verify we do not ask for any more maps
ContainerId cid0 = mockScheduler.assignContainer("map", false);
allocator.schedule();
// default rack and one for the ANY request
Assert.assertEquals(3, mockScheduler.lastAsk.size());
validateLabelsRequests(mockScheduler.lastAsk.get(0), true);
validateLabelsRequests(mockScheduler.lastAsk.get(1), true);
validateLabelsRequests(mockScheduler.lastAsk.get(2), true);
// complete the map task and verify that we ask for one more
allocator.close();
}
private void validateLabelsRequests(ResourceRequest resourceRequest,
boolean isReduce) {
switch (resourceRequest.getResourceName()) {
case "map":
case "reduce":
case NetworkTopology.DEFAULT_RACK:
Assert.assertNull(resourceRequest.getNodeLabelExpression());
break;
case "*":
Assert.assertEquals(isReduce ? "ReduceNodes" : "MapNodes",
resourceRequest.getNodeLabelExpression());
break;
default:
Assert.fail("Invalid resource location "
+ resourceRequest.getResourceName());
}
}
@Test
public void testMapReduceScheduling() throws Exception {
LOG.info("Running testMapReduceScheduling");
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 1024);
MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
// create the container request
// send MAP request
ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] {
"h1", "h2" }, true, false);
allocator.sendRequest(event1);
// send REDUCE request
ContainerRequestEvent event2 = createReq(jobId, 2, 3000,
new String[] { "h1" }, false, true);
allocator.sendRequest(event2);
// send MAP request
ContainerRequestEvent event3 = createReq(jobId, 3, 2048,
new String[] { "h3" }, false, false);
allocator.sendRequest(event3);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
nodeManager2.nodeHeartbeat(true); // Node heartbeat
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[] { event1, event3 },
assigned, false);
// validate that no container is assigned to h1 as it doesn't have 2048
for (TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertFalse("Assigned count not correct", "h1".equals(assig
.getContainer().getNodeId().getHost()));
}
}
private static class MyResourceManager extends MockRM {
private static long fakeClusterTimeStamp = System.currentTimeMillis();
public MyResourceManager(Configuration conf) {
super(conf);
}
public MyResourceManager(Configuration conf, RMStateStore store) {
super(conf, store);
}
@Override
public void serviceStart() throws Exception {
super.serviceStart();
// Ensure that the application attempt IDs for all the tests are the same
// The application attempt IDs will be used as the login user names
MyResourceManager.setClusterTimeStamp(fakeClusterTimeStamp);
}
@Override
protected Dispatcher createDispatcher() {
return new DrainDispatcher();
}
@Override
protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
// Dispatch inline for test sanity
return new EventHandler<SchedulerEvent>() {
@Override
public void handle(SchedulerEvent event) {
scheduler.handle(event);
}
};
}
@Override
protected ResourceScheduler createScheduler() {
return new MyFifoScheduler(this.getRMContext());
}
MyFifoScheduler getMyFifoScheduler() {
return (MyFifoScheduler) scheduler;
}
}
@Test
public void testReportedAppProgress() throws Exception {
LOG.info("Running testReportedAppProgress");
Configuration conf = new Configuration();
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp rmApp = rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 21504);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(
appAttemptId, 0), 10, 10, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new MyContainerAllocator(rm, appAttemptId, context);
};
};
Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
mrApp.submit(conf);
Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next()
.getValue();
DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
MyContainerAllocator allocator = (MyContainerAllocator) mrApp
.getContainerAllocator();
mrApp.waitForInternalState((JobImpl) job, JobStateInternal.RUNNING);
amDispatcher.await();
// Wait till all map-attempts request for containers
for (Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values()
.iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
}
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
// Wait for all map-tasks to be running
for (Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForState(t, TaskState.RUNNING);
}
}
allocator.schedule(); // Send heartbeat
rmDispatcher.await();
Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
// Finish off 1 map.
Iterator<Task> it = job.getTasks().values().iterator();
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.095f, job.getProgress(), 0.001f);
Assert.assertEquals(0.095f, rmApp.getProgress(), 0.001f);
// Finish off 7 more so that map-progress is 80%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 7);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.41f, job.getProgress(), 0.001f);
Assert.assertEquals(0.41f, rmApp.getProgress(), 0.001f);
// Finish off the 2 remaining maps
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
// Wait for all reduce-tasks to be running
for (Task t : job.getTasks().values()) {
if (t.getType() == TaskType.REDUCE) {
mrApp.waitForState(t, TaskState.RUNNING);
}
}
// Finish off 2 reduces
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 2);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
// Finish off the remaining 8 reduces.
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 8);
allocator.schedule();
rmDispatcher.await();
// Remaining is JobCleanup
Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
private void finishNextNTasks(DrainDispatcher rmDispatcher, MockNM node,
MRApp mrApp, Iterator<Task> it, int nextN) throws Exception {
Task task;
for (int i=0; i<nextN; i++) {
task = it.next();
finishTask(rmDispatcher, node, mrApp, task);
}
}
private void finishTask(DrainDispatcher rmDispatcher, MockNM node,
MRApp mrApp, Task task) throws Exception {
TaskAttempt attempt = task.getAttempts().values().iterator().next();
List<ContainerStatus> contStatus = new ArrayList<ContainerStatus>(1);
contStatus.add(ContainerStatus.newInstance(attempt.getAssignedContainerID(),
ContainerState.COMPLETE, "", 0));
Map<ApplicationId,List<ContainerStatus>> statusUpdate =
new HashMap<ApplicationId,List<ContainerStatus>>(1);
statusUpdate.put(mrApp.getAppID(), contStatus);
node.nodeHeartbeat(statusUpdate, true);
rmDispatcher.await();
mrApp.getContext().getEventHandler().handle(
new TaskAttemptEvent(attempt.getID(), TaskAttemptEventType.TA_DONE));
mrApp.waitForState(task, TaskState.SUCCEEDED);
}
@Test
public void testReportedAppProgressWithOnlyMaps() throws Exception {
LOG.info("Running testReportedAppProgressWithOnlyMaps");
Configuration conf = new Configuration();
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp rmApp = rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId = rmApp.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp = new MRApp(appAttemptId, ContainerId.newContainerId(
appAttemptId, 0), 10, 0, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new MyContainerAllocator(rm, appAttemptId, context);
};
};
Assert.assertEquals(0.0, rmApp.getProgress(), 0.0);
mrApp.submit(conf);
Job job = mrApp.getContext().getAllJobs().entrySet().iterator().next()
.getValue();
DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
MyContainerAllocator allocator = (MyContainerAllocator) mrApp
.getContainerAllocator();
mrApp.waitForInternalState((JobImpl)job, JobStateInternal.RUNNING);
amDispatcher.await();
// Wait till all map-attempts request for containers
for (Task t : job.getTasks().values()) {
mrApp.waitForInternalState((TaskAttemptImpl) t.getAttempts().values()
.iterator().next(), TaskAttemptStateInternal.UNASSIGNED);
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
// Wait for all map-tasks to be running
for (Task t : job.getTasks().values()) {
mrApp.waitForState(t, TaskState.RUNNING);
}
allocator.schedule(); // Send heartbeat
rmDispatcher.await();
Assert.assertEquals(0.05f, job.getProgress(), 0.001f);
Assert.assertEquals(0.05f, rmApp.getProgress(), 0.001f);
Iterator<Task> it = job.getTasks().values().iterator();
// Finish off 1 map so that map-progress is 10%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.14f, job.getProgress(), 0.001f);
Assert.assertEquals(0.14f, rmApp.getProgress(), 0.001f);
// Finish off 5 more map so that map-progress is 60%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 5);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f, job.getProgress(), 0.001f);
Assert.assertEquals(0.59f, rmApp.getProgress(), 0.001f);
// Finish off remaining map so that map-progress is 100%
finishNextNTasks(rmDispatcher, amNodeManager, mrApp, it, 4);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f, job.getProgress(), 0.001f);
Assert.assertEquals(0.95f, rmApp.getProgress(), 0.001f);
}
@Test
public void testUpdatedNodes() throws Exception {
Configuration conf = new Configuration();
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nm1 = rm.registerNode("h1:1234", 10240);
MockNM nm2 = rm.registerNode("h2:1234", 10240);
dispatcher.await();
// create the map container request
ContainerRequestEvent event = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event);
TaskAttemptId attemptId = event.getAttemptID();
TaskAttempt mockTaskAttempt = mock(TaskAttempt.class);
when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
Task mockTask = mock(Task.class);
when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
// this tells the scheduler about the requests
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(3, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
allocator.getJobUpdatedNodeEvents().clear();
// get the assignment
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(1, assigned.size());
Assert.assertEquals(nm1.getNodeId(), assigned.get(0).getContainer().getNodeId());
// no updated nodes reported
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
// mark nodes bad
nm1.nodeHeartbeat(false);
nm2.nodeHeartbeat(false);
dispatcher.await();
// schedule response returns updated nodes
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(0, assigned.size());
// updated nodes are reported
Assert.assertEquals(1, allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(1, allocator.getTaskAttemptKillEvents().size());
Assert.assertEquals(2, allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
Assert.assertEquals(attemptId, allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
allocator.getJobUpdatedNodeEvents().clear();
allocator.getTaskAttemptKillEvents().clear();
assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals(0, assigned.size());
// no updated nodes reported
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
}
@Test
public void testBlackListedNodes() throws Exception {
LOG.info("Running testBlackListedNodes");
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
// create the container request
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
// send 1 more request with different resource req
ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
new String[] { "h2" });
allocator.sendRequest(event2);
// send another request with different resource and priority
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
new String[] { "h3" });
allocator.sendRequest(event3);
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// Send events to blacklist nodes h1 and h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
allocator.sendFailure(f1);
ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h2", false);
allocator.sendFailure(f2);
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
nodeManager2.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
assigned = allocator.schedule();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
assertBlacklistAdditionsAndRemovals(2, 0, rm);
// mark h1/h2 as bad nodes
nodeManager1.nodeHeartbeat(false);
nodeManager2.nodeHeartbeat(false);
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertTrue("No of assignments must be 3", assigned.size() == 3);
// validate that all containers are assigned to h3
for (TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertTrue("Assigned container host not correct", "h3".equals(assig
.getContainer().getNodeId().getHost()));
}
}
@Test
public void testIgnoreBlacklisting() throws Exception {
LOG.info("Running testIgnoreBlacklisting");
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, 33);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher =
(DrainDispatcher) rm.getRMContext().getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM[] nodeManagers = new MockNM[10];
int nmNum = 0;
List<TaskAttemptContainerAssignedEvent> assigned = null;
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
nodeManagers[0].nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId =
app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator =
new MyContainerAllocator(rm, conf, appAttemptId, mockJob);
// Known=1, blacklisted=0, ignore should be false - assign first container
assigned =
getContainerOnHost(jobId, 1, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
LOG.info("Failing container _1 on H1 (Node should be blacklisted and"
+ " ignore blacklisting enabled");
// Send events to blacklist nodes h1 and h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
allocator.sendFailure(f1);
// Test single node.
// Known=1, blacklisted=1, ignore should be true - assign 0
// Because makeRemoteRequest will not be aware of it until next call
// The current call will send blacklisted node "h1" to RM
assigned =
getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 1, 0, 0, 1, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// Known=1, blacklisted=1, ignore should be true - assign 1
assigned =
getContainerOnHost(jobId, 2, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
// Known=2, blacklisted=1, ignore should be true - assign 1 anyway.
assigned =
getContainerOnHost(jobId, 3, 1024, new String[] { "h2" },
nodeManagers[1], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
// Known=3, blacklisted=1, ignore should be true - assign 1 anyway.
assigned =
getContainerOnHost(jobId, 4, 1024, new String[] { "h3" },
nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
// Known=3, blacklisted=1, ignore should be true - assign 1
assigned =
getContainerOnHost(jobId, 5, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
// Known=4, blacklisted=1, ignore should be false - assign 1 anyway
assigned =
getContainerOnHost(jobId, 6, 1024, new String[] { "h4" },
nodeManagers[3], dispatcher, allocator, 0, 0, 1, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
// Test blacklisting re-enabled.
// Known=4, blacklisted=1, ignore should be false - no assignment on h1
assigned =
getContainerOnHost(jobId, 7, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// RMContainerRequestor would have created a replacement request.
// Blacklist h2
ContainerFailedEvent f2 = createFailEvent(jobId, 3, "h2", false);
allocator.sendFailure(f2);
// Test ignore blacklisting re-enabled
// Known=4, blacklisted=2, ignore should be true. Should assign 0
// container for the same reason above.
assigned =
getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 1, 0, 0, 2, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// Known=4, blacklisted=2, ignore should be true. Should assign 2
// containers.
assigned =
getContainerOnHost(jobId, 8, 1024, new String[] { "h1" },
nodeManagers[0], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
// Known=4, blacklisted=2, ignore should be true.
assigned =
getContainerOnHost(jobId, 9, 1024, new String[] { "h2" },
nodeManagers[1], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
// Test blacklist while ignore blacklisting enabled
ContainerFailedEvent f3 = createFailEvent(jobId, 4, "h3", false);
allocator.sendFailure(f3);
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
// Known=5, blacklisted=3, ignore should be true.
assigned =
getContainerOnHost(jobId, 10, 1024, new String[] { "h3" },
nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
// Assign on 5 more nodes - to re-enable blacklisting
for (int i = 0; i < 5; i++) {
nodeManagers[nmNum] = registerNodeManager(nmNum++, rm, dispatcher);
assigned =
getContainerOnHost(jobId, 11 + i, 1024,
new String[] { String.valueOf(5 + i) }, nodeManagers[4 + i],
dispatcher, allocator, 0, 0, (i == 4 ? 3 : 0), 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
}
// Test h3 (blacklisted while ignoring blacklisting) is blacklisted.
assigned =
getContainerOnHost(jobId, 20, 1024, new String[] { "h3" },
nodeManagers[2], dispatcher, allocator, 0, 0, 0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
}
private MockNM registerNodeManager(int i, MyResourceManager rm,
DrainDispatcher dispatcher) throws Exception {
MockNM nm = rm.registerNode("h" + (i + 1) + ":1234", 10240);
dispatcher.await();
return nm;
}
private
List<TaskAttemptContainerAssignedEvent> getContainerOnHost(JobId jobId,
int taskAttemptId, int memory, String[] hosts, MockNM mockNM,
DrainDispatcher dispatcher, MyContainerAllocator allocator,
int expectedAdditions1, int expectedRemovals1,
int expectedAdditions2, int expectedRemovals2, MyResourceManager rm)
throws Exception {
ContainerRequestEvent reqEvent =
createReq(jobId, taskAttemptId, memory, hosts);
allocator.sendRequest(reqEvent);
// Send the request to the RM
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(
expectedAdditions1, expectedRemovals1, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// Heartbeat from the required nodeManager
mockNM.nodeHeartbeat(true);
dispatcher.await();
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(
expectedAdditions2, expectedRemovals2, rm);
return assigned;
}
@Test
public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
dispatcher.await();
LOG.info("Requesting 1 Containers _1 on H1");
// create the container request
ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
new String[] { "h1" });
allocator.sendRequest(event1);
LOG.info("RM Heartbeat (to send the container requests)");
// this tells the scheduler about the requests
// as nodes are not added, no allocations
List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 1", 1, assigned.size());
LOG.info("Failing container _1 on H1 (should blacklist the node)");
// Send events to blacklist nodes h1 and h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
allocator.sendFailure(f1);
//At this stage, a request should be created for a fast fail map
//Create a FAST_FAIL request for a previously failed map.
ContainerRequestEvent event1f = createReq(jobId, 1, 1024,
new String[] { "h1" }, true, false);
allocator.sendRequest(event1f);
//Update the Scheduler with the new requests.
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(1, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
// send another request with different resource and priority
ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
new String[] { "h1", "h3" });
allocator.sendRequest(event3);
//Allocator is aware of prio:5 container, and prio:20 (h1+h3) container.
//RM is only aware of the prio:5 container
LOG.info("h1 Heartbeat (To actually schedule the containers)");
// update resources in scheduler
nodeManager1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//RMContainerAllocator gets assigned a p:5 on a blacklisted node.
//Send a release for the p:5 container + another request.
LOG.info("RM Heartbeat (To process the re-scheduled containers)");
assigned = allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
//Hearbeat from H3 to schedule on this host.
LOG.info("h3 Heartbeat (To re-schedule the containers)");
nodeManager3.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
assigned = allocator.schedule();
assertBlacklistAdditionsAndRemovals(0, 0, rm);
dispatcher.await();
// For debugging
for (TaskAttemptContainerAssignedEvent assig : assigned) {
LOG.info(assig.getTaskAttemptID() +
" assgined to " + assig.getContainer().getId() +
" with priority " + assig.getContainer().getPriority());
}
Assert.assertEquals("No of assignments must be 2", 2, assigned.size());
// validate that all containers are assigned to h3
for (TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertEquals("Assigned container " + assig.getContainer().getId()
+ " host not correct", "h3", assig.getContainer().getNodeId().getHost());
}
}
private static void assertBlacklistAdditionsAndRemovals(
int expectedAdditions, int expectedRemovals, MyResourceManager rm) {
Assert.assertEquals(expectedAdditions,
rm.getMyFifoScheduler().lastBlacklistAdditions.size());
Assert.assertEquals(expectedRemovals,
rm.getMyFifoScheduler().lastBlacklistRemovals.size());
}
private static void assertAsksAndReleases(int expectedAsk,
int expectedRelease, MyResourceManager rm) {
Assert.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size());
Assert.assertEquals(expectedRelease,
rm.getMyFifoScheduler().lastRelease.size());
}
private static class MyFifoScheduler extends FifoScheduler {
public MyFifoScheduler(RMContext rmContext) {
super();
try {
Configuration conf = new Configuration();
reinitialize(conf, rmContext);
} catch (IOException ie) {
LOG.info("add application failed with ", ie);
assert (false);
}
}
List<ResourceRequest> lastAsk = null;
List<ContainerId> lastRelease = null;
List<String> lastBlacklistAdditions;
List<String> lastBlacklistRemovals;
// override this to copy the objects otherwise FifoScheduler updates the
// numContainers in same objects as kept by RMContainerAllocator
@Override
public synchronized Allocation allocate(
ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
List<ContainerId> release,
List<String> blacklistAdditions, List<String> blacklistRemovals) {
List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
for (ResourceRequest req : ask) {
ResourceRequest reqCopy = ResourceRequest.newInstance(req
.getPriority(), req.getResourceName(), req.getCapability(), req
.getNumContainers(), req.getRelaxLocality());
askCopy.add(reqCopy);
}
SecurityUtil.setTokenServiceUseIp(false);
lastAsk = ask;
lastRelease = release;
lastBlacklistAdditions = blacklistAdditions;
lastBlacklistRemovals = blacklistRemovals;
return super.allocate(
applicationAttemptId, askCopy, release,
blacklistAdditions, blacklistRemovals);
}
}
private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
int memory, String[] hosts) {
return createReq(jobId, taskAttemptId, memory, hosts, false, false);
}
private ContainerRequestEvent
createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
boolean earlierFailedAttempt, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
taskAttemptId);
Resource containerNeed = Resource.newInstance(memory, 1);
if (earlierFailedAttempt) {
return ContainerRequestEvent
.createContainerRequestEventForFailedContainer(attemptId,
containerNeed);
}
return new ContainerRequestEvent(attemptId, containerNeed, hosts,
new String[] { NetworkTopology.DEFAULT_RACK });
}
private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
String host, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
taskAttemptId);
return new ContainerFailedEvent(attemptId, host);
}
private ContainerAllocatorEvent createDeallocateEvent(JobId jobId,
int taskAttemptId, boolean reduce) {
TaskId taskId;
if (reduce) {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
} else {
taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
}
TaskAttemptId attemptId =
MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId);
return new ContainerAllocatorEvent(attemptId,
ContainerAllocator.EventType.CONTAINER_DEALLOCATE);
}
private void checkAssignments(ContainerRequestEvent[] requests,
List<TaskAttemptContainerAssignedEvent> assignments,
boolean checkHostMatch) {
Assert.assertNotNull("Container not assigned", assignments);
Assert.assertEquals("Assigned count not correct", requests.length,
assignments.size());
// check for uniqueness of containerIDs
Set<ContainerId> containerIds = new HashSet<ContainerId>();
for (TaskAttemptContainerAssignedEvent assigned : assignments) {
containerIds.add(assigned.getContainer().getId());
}
Assert.assertEquals("Assigned containers must be different", assignments
.size(), containerIds.size());
// check for all assignment
for (ContainerRequestEvent req : requests) {
TaskAttemptContainerAssignedEvent assigned = null;
for (TaskAttemptContainerAssignedEvent ass : assignments) {
if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
assigned = ass;
break;
}
}
checkAssignment(req, assigned, checkHostMatch);
}
}
private void checkAssignment(ContainerRequestEvent request,
TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
Assert.assertNotNull("Nothing assigned to attempt "
+ request.getAttemptID(), assigned);
Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
assigned.getTaskAttemptID());
if (checkHostMatch) {
Assert.assertTrue("Not assigned to requested host", Arrays.asList(
request.getHosts()).contains(
assigned.getContainer().getNodeId().getHost()));
}
}
// Mock RMContainerAllocator
// Instead of talking to remote Scheduler,uses the local Scheduler
private static class MyContainerAllocator extends RMContainerAllocator {
static final List<TaskAttemptContainerAssignedEvent> events
= new ArrayList<TaskAttemptContainerAssignedEvent>();
static final List<TaskAttemptKillEvent> taskAttemptKillEvents
= new ArrayList<TaskAttemptKillEvent>();
static final List<JobUpdatedNodesEvent> jobUpdatedNodeEvents
= new ArrayList<JobUpdatedNodesEvent>();
static final List<JobEvent> jobEvents = new ArrayList<JobEvent>();
private MyResourceManager rm;
private boolean isUnregistered = false;
private AllocateResponse allocateResponse;
private static AppContext createAppContext(
ApplicationAttemptId appAttemptId, Job job) {
AppContext context = mock(AppContext.class);
ApplicationId appId = appAttemptId.getApplicationId();
when(context.getApplicationID()).thenReturn(appId);
when(context.getApplicationAttemptId()).thenReturn(appAttemptId);
when(context.getJob(isA(JobId.class))).thenReturn(job);
when(context.getClusterInfo()).thenReturn(
new ClusterInfo(Resource.newInstance(10240, 1)));
when(context.getEventHandler()).thenReturn(new EventHandler() {
@Override
public void handle(Event event) {
// Only capture interesting events.
if (event instanceof TaskAttemptContainerAssignedEvent) {
events.add((TaskAttemptContainerAssignedEvent) event);
} else if (event instanceof TaskAttemptKillEvent) {
taskAttemptKillEvents.add((TaskAttemptKillEvent)event);
} else if (event instanceof JobUpdatedNodesEvent) {
jobUpdatedNodeEvents.add((JobUpdatedNodesEvent)event);
} else if (event instanceof JobEvent) {
jobEvents.add((JobEvent)event);
}
}
});
return context;
}
private static AppContext createAppContext(
ApplicationAttemptId appAttemptId, Job job, Clock clock) {
AppContext context = createAppContext(appAttemptId, job);
when(context.getClock()).thenReturn(clock);
return context;
}
private static ClientService createMockClientService() {
ClientService service = mock(ClientService.class);
when(service.getBindAddress()).thenReturn(
NetUtils.createSocketAddr("localhost:4567"));
when(service.getHttpPort()).thenReturn(890);
return service;
}
// Use this constructor when using a real job.
MyContainerAllocator(MyResourceManager rm,
ApplicationAttemptId appAttemptId, AppContext context) {
super(createMockClientService(), context);
this.rm = rm;
}
// Use this constructor when you are using a mocked job.
public MyContainerAllocator(MyResourceManager rm, Configuration conf,
ApplicationAttemptId appAttemptId, Job job) {
super(createMockClientService(), createAppContext(appAttemptId, job));
this.rm = rm;
super.init(conf);
super.start();
}
public MyContainerAllocator(MyResourceManager rm, Configuration conf,
ApplicationAttemptId appAttemptId, Job job, Clock clock) {
super(createMockClientService(),
createAppContext(appAttemptId, job, clock));
this.rm = rm;
super.init(conf);
super.start();
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return this.rm.getApplicationMasterService();
}
@Override
protected void register() {
ApplicationAttemptId attemptId = getContext().getApplicationAttemptId();
Token<AMRMTokenIdentifier> token =
rm.getRMContext().getRMApps().get(attemptId.getApplicationId())
.getRMAppAttempt(attemptId).getAMRMToken();
try {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addTokenIdentifier(token.decodeIdentifier());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
super.register();
}
@Override
protected void unregister() {
isUnregistered=true;
}
@Override
protected Resource getMaxContainerCapability() {
return Resource.newInstance(10240, 1);
}
public void sendRequest(ContainerRequestEvent req) {
sendRequests(Arrays.asList(new ContainerRequestEvent[] { req }));
}
public void sendRequests(List<ContainerRequestEvent> reqs) {
for (ContainerRequestEvent req : reqs) {
super.handleEvent(req);
}
}
public void sendFailure(ContainerFailedEvent f) {
super.handleEvent(f);
}
public void sendDeallocate(ContainerAllocatorEvent f) {
super.handleEvent(f);
}
// API to be used by tests
public List<TaskAttemptContainerAssignedEvent> schedule()
throws Exception {
// before doing heartbeat with RM, drain all the outstanding events to
// ensure all the requests before this heartbeat is to be handled
GenericTestUtils.waitFor(new Supplier<Boolean>() {
public Boolean get() {
return eventQueue.isEmpty();
}
}, 100, 10000);
// run the scheduler
try {
super.heartbeat();
} catch (Exception e) {
LOG.error("error in heartbeat ", e);
throw new YarnRuntimeException(e);
}
List<TaskAttemptContainerAssignedEvent> result
= new ArrayList<TaskAttemptContainerAssignedEvent>(events);
events.clear();
return result;
}
static List<TaskAttemptKillEvent> getTaskAttemptKillEvents() {
return taskAttemptKillEvents;
}
static List<JobUpdatedNodesEvent> getJobUpdatedNodeEvents() {
return jobUpdatedNodeEvents;
}
@Override
protected void startAllocatorThread() {
// override to NOT start thread
}
@Override
protected boolean isApplicationMasterRegistered() {
return super.isApplicationMasterRegistered();
}
public boolean isUnregistered() {
return isUnregistered;
}
public void updateSchedulerProxy(MyResourceManager rm) {
scheduler = rm.getApplicationMasterService();
}
@Override
protected AllocateResponse makeRemoteRequest() throws IOException,
YarnException {
allocateResponse = super.makeRemoteRequest();
return allocateResponse;
}
}
private static class MyContainerAllocator2 extends MyContainerAllocator {
public MyContainerAllocator2(MyResourceManager rm, Configuration conf,
ApplicationAttemptId appAttemptId, Job job) {
super(rm, conf, appAttemptId, job);
}
@Override
protected AllocateResponse makeRemoteRequest() throws IOException,
YarnException {
throw new YarnRuntimeException("for testing");
}
}
@Test
public void testReduceScheduling() throws Exception {
int totalMaps = 10;
int succeededMaps = 1;
int scheduledMaps = 10;
int scheduledReduces = 0;
int assignedMaps = 2;
int assignedReduces = 0;
Resource mapResourceReqt = BuilderUtils.newResource(1024, 1);
Resource reduceResourceReqt = BuilderUtils.newResource(2 * 1024, 1);
int numPendingReduces = 4;
float maxReduceRampupLimit = 0.5f;
float reduceSlowStart = 0.2f;
RMContainerAllocator allocator = mock(RMContainerAllocator.class);
doCallRealMethod().when(allocator).scheduleReduces(anyInt(), anyInt(),
anyInt(), anyInt(), anyInt(), anyInt(), any(Resource.class),
any(Resource.class), anyInt(), anyFloat(), anyFloat());
doReturn(EnumSet.of(SchedulerResourceTypes.MEMORY)).when(allocator)
.getSchedulerResourceTypes();
// Test slow-start
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator, never()).setIsReduceStarted(true);
// verify slow-start still in effect when no more maps need to
// be scheduled but some have yet to complete
allocator.scheduleReduces(
totalMaps, succeededMaps,
0, scheduledReduces,
totalMaps - succeededMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator, never()).setIsReduceStarted(true);
verify(allocator, never()).scheduleAllReduces();
succeededMaps = 3;
doReturn(BuilderUtils.newResource(0, 0)).when(allocator).getResourceLimit();
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator, times(1)).setIsReduceStarted(true);
// Test reduce ramp-up
doReturn(BuilderUtils.newResource(100 * 1024, 100 * 1)).when(allocator)
.getResourceLimit();
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator).rampUpReduces(anyInt());
verify(allocator, never()).rampDownReduces(anyInt());
// Test reduce ramp-down
scheduledReduces = 3;
doReturn(BuilderUtils.newResource(10 * 1024, 10 * 1)).when(allocator)
.getResourceLimit();
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator).rampDownReduces(anyInt());
// Test reduce ramp-down for when there are scheduled maps
// Since we have two scheduled Maps, rampDownReducers
// should be invoked twice.
scheduledMaps = 2;
assignedReduces = 2;
doReturn(BuilderUtils.newResource(10 * 1024, 10 * 1)).when(allocator)
.getResourceLimit();
allocator.scheduleReduces(
totalMaps, succeededMaps,
scheduledMaps, scheduledReduces,
assignedMaps, assignedReduces,
mapResourceReqt, reduceResourceReqt,
numPendingReduces,
maxReduceRampupLimit, reduceSlowStart);
verify(allocator, times(2)).rampDownReduces(anyInt());
doReturn(
EnumSet.of(SchedulerResourceTypes.MEMORY, SchedulerResourceTypes.CPU))
.when(allocator).getSchedulerResourceTypes();
// Test ramp-down when enough memory but not enough cpu resource
scheduledMaps = 10;
assignedReduces = 0;
doReturn(BuilderUtils.newResource(100 * 1024, 5 * 1)).when(allocator)
.getResourceLimit();
allocator.scheduleReduces(totalMaps, succeededMaps, scheduledMaps,
scheduledReduces, assignedMaps, assignedReduces, mapResourceReqt,
reduceResourceReqt, numPendingReduces, maxReduceRampupLimit,
reduceSlowStart);
verify(allocator, times(3)).rampDownReduces(anyInt());
// Test ramp-down when enough cpu but not enough memory resource
doReturn(BuilderUtils.newResource(10 * 1024, 100 * 1)).when(allocator)
.getResourceLimit();
allocator.scheduleReduces(totalMaps, succeededMaps, scheduledMaps,
scheduledReduces, assignedMaps, assignedReduces, mapResourceReqt,
reduceResourceReqt, numPendingReduces, maxReduceRampupLimit,
reduceSlowStart);
verify(allocator, times(4)).rampDownReduces(anyInt());
}
private static class RecalculateContainerAllocator extends MyContainerAllocator {
public boolean recalculatedReduceSchedule = false;
public RecalculateContainerAllocator(MyResourceManager rm,
Configuration conf, ApplicationAttemptId appAttemptId, Job job) {
super(rm, conf, appAttemptId, job);
}
@Override
public void scheduleReduces(int totalMaps, int completedMaps,
int scheduledMaps, int scheduledReduces, int assignedMaps,
int assignedReduces, Resource mapResourceReqt, Resource reduceResourceReqt,
int numPendingReduces, float maxReduceRampupLimit, float reduceSlowStart) {
recalculatedReduceSchedule = true;
}
}
@Test
public void testCompletedTasksRecalculateSchedule() throws Exception {
LOG.info("Running testCompletedTasksRecalculateSchedule");
Configuration conf = new Configuration();
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
// Make a node to register so as to launch the AM.
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job job = mock(Job.class);
when(job.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
doReturn(10).when(job).getTotalMaps();
doReturn(10).when(job).getTotalReduces();
doReturn(0).when(job).getCompletedMaps();
RecalculateContainerAllocator allocator =
new RecalculateContainerAllocator(rm, conf, appAttemptId, job);
allocator.schedule();
allocator.recalculatedReduceSchedule = false;
allocator.schedule();
Assert.assertFalse("Unexpected recalculate of reduce schedule",
allocator.recalculatedReduceSchedule);
doReturn(1).when(job).getCompletedMaps();
allocator.schedule();
Assert.assertTrue("Expected recalculate of reduce schedule",
allocator.recalculatedReduceSchedule);
}
@Test
public void testHeartbeatHandler() throws Exception {
LOG.info("Running testHeartbeatHandler");
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 1);
ControlledClock clock = new ControlledClock(new SystemClock());
AppContext appContext = mock(AppContext.class);
when(appContext.getClock()).thenReturn(clock);
when(appContext.getApplicationID()).thenReturn(
ApplicationId.newInstance(1, 1));
RMContainerAllocator allocator = new RMContainerAllocator(
mock(ClientService.class), appContext) {
@Override
protected void register() {
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return mock(ApplicationMasterProtocol.class);
}
@Override
protected synchronized void heartbeat() throws Exception {
}
};
allocator.init(conf);
allocator.start();
clock.setTime(5);
int timeToWaitMs = 5000;
while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals(5, allocator.getLastHeartbeatTime());
clock.setTime(7);
timeToWaitMs = 5000;
while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals(7, allocator.getLastHeartbeatTime());
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
allocator.runOnNextHeartbeat(new Runnable() {
@Override
public void run() {
callbackCalled.set(true);
}
});
clock.setTime(8);
timeToWaitMs = 5000;
while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals(8, allocator.getLastHeartbeatTime());
Assert.assertTrue(callbackCalled.get());
}
@Test
public void testCompletedContainerEvent() {
RMContainerAllocator allocator = new RMContainerAllocator(
mock(ClientService.class), mock(AppContext.class));
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(
MRBuilderUtils.newTaskId(
MRBuilderUtils.newJobId(1, 1, 1), 1, TaskType.MAP), 1);
ApplicationId applicationId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(
applicationId, 1);
ContainerId containerId = ContainerId.newContainerId(applicationAttemptId, 1);
ContainerStatus status = ContainerStatus.newInstance(
containerId, ContainerState.RUNNING, "", 0);
ContainerStatus abortedStatus = ContainerStatus.newInstance(
containerId, ContainerState.RUNNING, "",
ContainerExitStatus.ABORTED);
TaskAttemptEvent event = allocator.createContainerFinishedEvent(status,
attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
event.getType());
TaskAttemptEvent abortedEvent = allocator.createContainerFinishedEvent(
abortedStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent.getType());
ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2);
ContainerStatus status2 = ContainerStatus.newInstance(containerId2,
ContainerState.RUNNING, "", 0);
ContainerStatus preemptedStatus = ContainerStatus.newInstance(containerId2,
ContainerState.RUNNING, "", ContainerExitStatus.PREEMPTED);
TaskAttemptEvent event2 = allocator.createContainerFinishedEvent(status2,
attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,
event2.getType());
TaskAttemptEvent abortedEvent2 = allocator.createContainerFinishedEvent(
preemptedStatus, attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL, abortedEvent2.getType());
}
@Test
public void testUnregistrationOnlyIfRegistered() throws Exception {
Configuration conf = new Configuration();
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher =
(DrainDispatcher) rm.getRMContext().getDispatcher();
// Submit the application
RMApp rmApp = rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId =
rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp =
new MRApp(appAttemptId, ContainerId.newContainerId(appAttemptId, 0), 10,
0, false, this.getClass().getName(), true, 1) {
@Override
protected Dispatcher createDispatcher() {
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator(
ClientService clientService, AppContext context) {
return new MyContainerAllocator(rm, appAttemptId, context);
};
};
mrApp.submit(conf);
DrainDispatcher amDispatcher = (DrainDispatcher) mrApp.getDispatcher();
MyContainerAllocator allocator =
(MyContainerAllocator) mrApp.getContainerAllocator();
amDispatcher.await();
Assert.assertTrue(allocator.isApplicationMasterRegistered());
mrApp.stop();
Assert.assertTrue(allocator.isUnregistered());
}
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
// blackListeNode
// Step-2 : 2 containers are allocated by RM.
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
// RM
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
// additional containerRequest(event4) and blacklisted nodes.
// Intern RM send resync command
// Step-5 : On Resync,AM sends all outstanding
// asks,release,blacklistAaddition
// and another containerRequest(event5)
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
@Test
public void testRMContainerAllocatorResendsRequestsOnRMRestart()
throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
conf.setInt(
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1 = new MyResourceManager(conf, memStore);
rm1.start();
DrainDispatcher dispatcher =
(DrainDispatcher) rm1.getRMContext().getDispatcher();
// Submit the application
RMApp app = rm1.submitApp(1024);
dispatcher.await();
MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
ApplicationAttemptId appAttemptId =
app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator allocator =
new MyContainerAllocator(rm1, conf, appAttemptId, mockJob);
// Step-1 : AM send allocate request for 2 ContainerRequests and 1
// blackListeNode
// create the container request
// send MAP request
ContainerRequestEvent event1 =
createReq(jobId, 1, 1024, new String[] { "h1" });
allocator.sendRequest(event1);
ContainerRequestEvent event2 =
createReq(jobId, 2, 2048, new String[] { "h1", "h2" });
allocator.sendRequest(event2);
// Send events to blacklist h2
ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h2", false);
allocator.sendFailure(f1);
// send allocate request and 1 blacklisted nodes
List<TaskAttemptContainerAssignedEvent> assignedContainers =
allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
// Why ask is 3, not 4? --> ask from blacklisted node h2 is removed
assertAsksAndReleases(3, 0, rm1);
assertBlacklistAdditionsAndRemovals(1, 0, rm1);
nm1.nodeHeartbeat(true); // Node heartbeat
dispatcher.await();
// Step-2 : 2 containers are allocated by RM.
assignedContainers = allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 2", 2,
assignedContainers.size());
assertAsksAndReleases(0, 0, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
assignedContainers = allocator.schedule();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
assertAsksAndReleases(3, 0, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
// Step-3 : AM Send 1 containerRequest(event3) and 1 releaseRequests to
// RM
// send container request
ContainerRequestEvent event3 =
createReq(jobId, 3, 1000, new String[] { "h1" });
allocator.sendRequest(event3);
// send deallocate request
ContainerAllocatorEvent deallocate1 =
createDeallocateEvent(jobId, 1, false);
allocator.sendDeallocate(deallocate1);
assignedContainers = allocator.schedule();
Assert.assertEquals("No of assignments must be 0", 0,
assignedContainers.size());
assertAsksAndReleases(3, 1, rm1);
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
// Phase-2 start 2nd RM is up
MyResourceManager rm2 = new MyResourceManager(conf, memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
allocator.updateSchedulerProxy(rm2);
dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher();
// NM should be rebooted on heartbeat, even first heartbeat for nm2
NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction());
// new NM to represent NM re-register
nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
// Step-4 : On RM restart, AM(does not know RM is restarted) sends
// additional containerRequest(event4) and blacklisted nodes.
// Intern RM send resync command
// send deallocate request, release=1
ContainerAllocatorEvent deallocate2 =
createDeallocateEvent(jobId, 2, false);
allocator.sendDeallocate(deallocate2);
// Send events to blacklist nodes h3
ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h3", false);
allocator.sendFailure(f2);
ContainerRequestEvent event4 =
createReq(jobId, 4, 2000, new String[] { "h1", "h2" });
allocator.sendRequest(event4);
// send allocate request to 2nd RM and get resync command
allocator.schedule();
dispatcher.await();
// Step-5 : On Resync,AM sends all outstanding
// asks,release,blacklistAaddition
// and another containerRequest(event5)
ContainerRequestEvent event5 =
createReq(jobId, 5, 3000, new String[] { "h1", "h2", "h3" });
allocator.sendRequest(event5);
// send all outstanding request again.
assignedContainers = allocator.schedule();
dispatcher.await();
assertAsksAndReleases(3, 2, rm2);
assertBlacklistAdditionsAndRemovals(2, 0, rm2);
nm1.nodeHeartbeat(true);
dispatcher.await();
// Step-6 : RM allocates containers i.e event3,event4 and cRequest5
assignedContainers = allocator.schedule();
dispatcher.await();
Assert.assertEquals("Number of container should be 3", 3,
assignedContainers.size());
for (TaskAttemptContainerAssignedEvent assig : assignedContainers) {
Assert.assertTrue("Assigned count not correct",
"h1".equals(assig.getContainer().getNodeId().getHost()));
}
rm1.stop();
rm2.stop();
}
@Test
public void testRMUnavailable()
throws Exception {
Configuration conf = new Configuration();
conf.setInt(
MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, 0);
MyResourceManager rm1 = new MyResourceManager(conf);
rm1.start();
DrainDispatcher dispatcher =
(DrainDispatcher) rm1.getRMContext().getDispatcher();
RMApp app = rm1.submitApp(1024);
dispatcher.await();
MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId =
app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
MyContainerAllocator2 allocator =
new MyContainerAllocator2(rm1, conf, appAttemptId, mockJob);
allocator.jobEvents.clear();
try {
allocator.schedule();
Assert.fail("Should Have Exception");
} catch (YarnRuntimeException e) {
Assert.assertTrue(e.getMessage().contains("Could not contact RM after"));
}
dispatcher.await();
Assert.assertEquals("Should Have 1 Job Event", 1,
allocator.jobEvents.size());
JobEvent event = allocator.jobEvents.get(0);
Assert.assertTrue("Should Reboot", event.getType().equals(JobEventType.JOB_AM_REBOOT));
}
@Test(timeout=60000)
public void testAMRMTokenUpdate() throws Exception {
LOG.info("Running testAMRMTokenUpdate");
final String rmAddr = "somermaddress:1234";
final Configuration conf = new YarnConfiguration();
conf.setLong(
YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 8);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 2000);
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, rmAddr);
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
AMRMTokenSecretManager secretMgr =
rm.getRMContext().getAMRMTokenSecretManager();
DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
// Submit the application
RMApp app = rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
final ApplicationId appId = app.getApplicationId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
final Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
final Token<AMRMTokenIdentifier> oldToken = rm.getRMContext().getRMApps()
.get(appId).getRMAppAttempt(appAttemptId).getAMRMToken();
Assert.assertNotNull("app should have a token", oldToken);
UserGroupInformation testUgi = UserGroupInformation.createUserForTesting(
"someuser", new String[0]);
Token<AMRMTokenIdentifier> newToken = testUgi.doAs(
new PrivilegedExceptionAction<Token<AMRMTokenIdentifier>>() {
@Override
public Token<AMRMTokenIdentifier> run() throws Exception {
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// Keep heartbeating until RM thinks the token has been updated
Token<AMRMTokenIdentifier> currentToken = oldToken;
long startTime = Time.monotonicNow();
while (currentToken == oldToken) {
if (Time.monotonicNow() - startTime > 20000) {
Assert.fail("Took to long to see AMRM token change");
}
Thread.sleep(100);
allocator.schedule();
currentToken = rm.getRMContext().getRMApps().get(appId)
.getRMAppAttempt(appAttemptId).getAMRMToken();
}
return currentToken;
}
});
// verify there is only one AMRM token in the UGI and it matches the
// updated token from the RM
int tokenCount = 0;
Token<? extends TokenIdentifier> ugiToken = null;
for (Token<? extends TokenIdentifier> token : testUgi.getTokens()) {
if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())) {
ugiToken = token;
++tokenCount;
}
}
Assert.assertEquals("too many AMRM tokens", 1, tokenCount);
Assert.assertArrayEquals("token identifier not updated",
newToken.getIdentifier(), ugiToken.getIdentifier());
Assert.assertArrayEquals("token password not updated",
newToken.getPassword(), ugiToken.getPassword());
Assert.assertEquals("AMRM token service not updated",
new Text(rmAddr), ugiToken.getService());
}
@Test
public void testConcurrentTaskLimits() throws Exception {
final int MAP_LIMIT = 3;
final int REDUCE_LIMIT = 1;
LOG.info("Running testConcurrentTaskLimits");
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0f);
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 1);
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
MyContainerAllocator allocator = new MyContainerAllocator(null, conf,
appAttemptId, mockJob) {
@Override
protected void register() {
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return mockScheduler;
}
};
// create some map requests
ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[5];
for (int i = 0; i < reqMapEvents.length; ++i) {
reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
}
allocator.sendRequests(Arrays.asList(reqMapEvents));
// create some reduce requests
ContainerRequestEvent[] reqReduceEvents = new ContainerRequestEvent[2];
for (int i = 0; i < reqReduceEvents.length; ++i) {
reqReduceEvents[i] = createReq(jobId, i, 1024, new String[] {},
false, true);
}
allocator.sendRequests(Arrays.asList(reqReduceEvents));
allocator.schedule();
// verify all of the host-specific asks were sent plus one for the
// default rack and one for the ANY request
Assert.assertEquals(reqMapEvents.length + 2, mockScheduler.lastAsk.size());
// verify AM is only asking for the map limit overall
Assert.assertEquals(MAP_LIMIT, mockScheduler.lastAnyAskMap);
// assign a map task and verify we do not ask for any more maps
ContainerId cid0 = mockScheduler.assignContainer("h0", false);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(2, mockScheduler.lastAnyAskMap);
// complete the map task and verify that we ask for one more
mockScheduler.completeContainer(cid0);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(3, mockScheduler.lastAnyAskMap);
// assign three more maps and verify we ask for no more maps
ContainerId cid1 = mockScheduler.assignContainer("h1", false);
ContainerId cid2 = mockScheduler.assignContainer("h2", false);
ContainerId cid3 = mockScheduler.assignContainer("h3", false);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
// complete two containers and verify we only asked for one more
// since at that point all maps should be scheduled/completed
mockScheduler.completeContainer(cid2);
mockScheduler.completeContainer(cid3);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(1, mockScheduler.lastAnyAskMap);
// allocate the last container and complete the first one
// and verify there are no more map asks.
mockScheduler.completeContainer(cid1);
ContainerId cid4 = mockScheduler.assignContainer("h4", false);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
// complete the last map
mockScheduler.completeContainer(cid4);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskMap);
// verify only reduce limit being requested
Assert.assertEquals(REDUCE_LIMIT, mockScheduler.lastAnyAskReduce);
// assign a reducer and verify ask goes to zero
cid0 = mockScheduler.assignContainer("h0", true);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
// complete the reducer and verify we ask for another
mockScheduler.completeContainer(cid0);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(1, mockScheduler.lastAnyAskReduce);
// assign a reducer and verify ask goes to zero
cid0 = mockScheduler.assignContainer("h0", true);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
// complete the reducer and verify no more reducers
mockScheduler.completeContainer(cid0);
allocator.schedule();
allocator.schedule();
Assert.assertEquals(0, mockScheduler.lastAnyAskReduce);
allocator.close();
}
private static class MockScheduler implements ApplicationMasterProtocol {
ApplicationAttemptId attemptId;
long nextContainerId = 10;
List<ResourceRequest> lastAsk = null;
int lastAnyAskMap = 0;
int lastAnyAskReduce = 0;
List<ContainerStatus> containersToComplete =
new ArrayList<ContainerStatus>();
List<Container> containersToAllocate = new ArrayList<Container>();
public MockScheduler(ApplicationAttemptId attemptId) {
this.attemptId = attemptId;
}
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
return RegisterApplicationMasterResponse.newInstance(
Resource.newInstance(512, 1),
Resource.newInstance(512000, 1024),
Collections.<ApplicationAccessType,String>emptyMap(),
ByteBuffer.wrap("fake_key".getBytes()),
Collections.<Container>emptyList(),
"default",
Collections.<NMToken>emptyList());
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
return FinishApplicationMasterResponse.newInstance(false);
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
lastAsk = request.getAskList();
for (ResourceRequest req : lastAsk) {
if (ResourceRequest.ANY.equals(req.getResourceName())) {
Priority priority = req.getPriority();
if (priority.equals(RMContainerAllocator.PRIORITY_MAP)) {
lastAnyAskMap = req.getNumContainers();
} else if (priority.equals(RMContainerAllocator.PRIORITY_REDUCE)){
lastAnyAskReduce = req.getNumContainers();
}
}
}
AllocateResponse response = AllocateResponse.newInstance(
request.getResponseId(),
containersToComplete, containersToAllocate,
Collections.<NodeReport>emptyList(),
Resource.newInstance(512000, 1024), null, 10, null,
Collections.<NMToken>emptyList());
containersToComplete.clear();
containersToAllocate.clear();
return response;
}
public ContainerId assignContainer(String nodeName, boolean isReduce) {
ContainerId containerId =
ContainerId.newContainerId(attemptId, nextContainerId++);
Priority priority = isReduce ? RMContainerAllocator.PRIORITY_REDUCE
: RMContainerAllocator.PRIORITY_MAP;
Container container = Container.newInstance(containerId,
NodeId.newInstance(nodeName, 1234), nodeName + ":5678",
Resource.newInstance(1024, 1), priority, null);
containersToAllocate.add(container);
return containerId;
}
public void completeContainer(ContainerId containerId) {
containersToComplete.add(ContainerStatus.newInstance(containerId,
ContainerState.COMPLETE, "", 0));
}
}
public static void main(String[] args) throws Exception {
TestRMContainerAllocator t = new TestRMContainerAllocator();
t.testSimple();
t.testResource();
t.testMapReduceScheduling();
t.testReportedAppProgress();
t.testReportedAppProgressWithOnlyMaps();
t.testBlackListedNodes();
t.testCompletedTasksRecalculateSchedule();
t.testAMRMTokenUpdate();
}
}
| 107,552
| 37.968478
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/metrics/TestMRAppMetrics.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.metrics;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.apache.hadoop.test.MockitoMaker.*;
import org.junit.Test;
import static org.mockito.Mockito.*;
public class TestMRAppMetrics {
@Test public void testNames() {
Job job = mock(Job.class);
Task mapTask = make(stub(Task.class).returning(TaskType.MAP).
from.getType());
Task reduceTask = make(stub(Task.class).returning(TaskType.REDUCE).
from.getType());
MRAppMetrics metrics = MRAppMetrics.create();
metrics.submittedJob(job);
metrics.waitingTask(mapTask);
metrics.waitingTask(reduceTask);
metrics.preparingJob(job);
metrics.submittedJob(job);
metrics.waitingTask(mapTask);
metrics.waitingTask(reduceTask);
metrics.preparingJob(job);
metrics.submittedJob(job);
metrics.waitingTask(mapTask);
metrics.waitingTask(reduceTask);
metrics.preparingJob(job);
metrics.endPreparingJob(job);
metrics.endPreparingJob(job);
metrics.endPreparingJob(job);
metrics.runningJob(job);
metrics.launchedTask(mapTask);
metrics.runningTask(mapTask);
metrics.failedTask(mapTask);
metrics.endWaitingTask(reduceTask);
metrics.endRunningTask(mapTask);
metrics.endRunningJob(job);
metrics.failedJob(job);
metrics.runningJob(job);
metrics.launchedTask(mapTask);
metrics.runningTask(mapTask);
metrics.killedTask(mapTask);
metrics.endWaitingTask(reduceTask);
metrics.endRunningTask(mapTask);
metrics.endRunningJob(job);
metrics.killedJob(job);
metrics.runningJob(job);
metrics.launchedTask(mapTask);
metrics.runningTask(mapTask);
metrics.completedTask(mapTask);
metrics.endRunningTask(mapTask);
metrics.launchedTask(reduceTask);
metrics.runningTask(reduceTask);
metrics.completedTask(reduceTask);
metrics.endRunningTask(reduceTask);
metrics.endRunningJob(job);
metrics.completedJob(job);
checkMetrics(/*job*/3, 1, 1, 1, 0, 0,
/*map*/3, 1, 1, 1, 0, 0,
/*reduce*/1, 1, 0, 0, 0, 0);
}
private void checkMetrics(int jobsSubmitted, int jobsCompleted,
int jobsFailed, int jobsKilled, int jobsPreparing, int jobsRunning,
int mapsLaunched, int mapsCompleted, int mapsFailed, int mapsKilled,
int mapsRunning, int mapsWaiting, int reducesLaunched,
int reducesCompleted, int reducesFailed, int reducesKilled,
int reducesRunning, int reducesWaiting) {
MetricsRecordBuilder rb = getMetrics("MRAppMetrics");
assertCounter("JobsSubmitted", jobsSubmitted, rb);
assertCounter("JobsCompleted", jobsCompleted, rb);
assertCounter("JobsFailed", jobsFailed, rb);
assertCounter("JobsKilled", jobsKilled, rb);
assertGauge("JobsPreparing", jobsPreparing, rb);
assertGauge("JobsRunning", jobsRunning, rb);
assertCounter("MapsLaunched", mapsLaunched, rb);
assertCounter("MapsCompleted", mapsCompleted, rb);
assertCounter("MapsFailed", mapsFailed, rb);
assertCounter("MapsKilled", mapsKilled, rb);
assertGauge("MapsRunning", mapsRunning, rb);
assertGauge("MapsWaiting", mapsWaiting, rb);
assertCounter("ReducesLaunched", reducesLaunched, rb);
assertCounter("ReducesCompleted", reducesCompleted, rb);
assertCounter("ReducesFailed", reducesFailed, rb);
assertCounter("ReducesKilled", reducesKilled, rb);
assertGauge("ReducesRunning", reducesRunning, rb);
assertGauge("ReducesWaiting", reducesWaiting, rb);
}
}
| 4,596
| 36.680328
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.launcher;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy;
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
public class TestContainerLauncher {
private static final RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
Configuration conf;
Server server;
static final Log LOG = LogFactory.getLog(TestContainerLauncher.class);
@Test (timeout = 10000)
public void testPoolSize() throws InterruptedException {
ApplicationId appId = ApplicationId.newInstance(12345, 67);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 3);
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
context);
containerLauncher.init(new Configuration());
containerLauncher.start();
ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
// No events yet
Assert.assertEquals(containerLauncher.initialPoolSize,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
Assert.assertEquals(0, threadPool.getPoolSize());
Assert.assertEquals(containerLauncher.initialPoolSize,
threadPool.getCorePoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
for (int i = 0; i < 10; i++) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 10);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Same set of hosts, so no change
containerLauncher.finishEventHandling = true;
int timeOut = 0;
while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) {
LOG.info("Waiting for number of events processed to become " + 10
+ ". It is now " + containerLauncher.numEventsProcessed.get()
+ ". Timeout is " + timeOut);
Thread.sleep(1000);
}
Assert.assertEquals(10, containerLauncher.numEventsProcessed.get());
containerLauncher.finishEventHandling = false;
for (int i = 0; i < 10; i++) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId,
i + 10);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId,
i + 10);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 20);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Different hosts, there should be an increase in core-thread-pool size to
// 21(11hosts+10buffer)
// Core pool size should be 21 but the live pool size should be only 11.
containerLauncher.expectedCorePoolSize = 11 + containerLauncher.initialPoolSize;
containerLauncher.finishEventHandling = false;
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 21);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 21);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host11:1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
waitForEvents(containerLauncher, 21);
Assert.assertEquals(11, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
// change configuration MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE
// and verify initialPoolSize value.
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE,
20);
containerLauncher = new CustomContainerLauncher(context);
containerLauncher.init(conf);
Assert.assertEquals(containerLauncher.initialPoolSize, 20);
}
@Test(timeout = 5000)
public void testPoolLimits() throws InterruptedException {
ApplicationId appId = ApplicationId.newInstance(12345, 67);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 3);
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 10);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
context);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
containerLauncher.init(conf);
containerLauncher.start();
ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
// 10 different hosts
containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
for (int i = 0; i < 10; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 10);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// 4 more different hosts, but thread pool size should be capped at 12
containerLauncher.expectedCorePoolSize = 12 ;
for (int i = 1; i <= 4; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
containerId, "host1" + i + ":1234", null,
ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 12);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Make some threads ideal so that remaining events are also done.
containerLauncher.finishEventHandling = true;
waitForEvents(containerLauncher, 14);
Assert.assertEquals(12, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
private void waitForEvents(CustomContainerLauncher containerLauncher,
int expectedNumEvents) throws InterruptedException {
int timeOut = 0;
while (containerLauncher.numEventsProcessing.get() < expectedNumEvents
&& timeOut++ < 20) {
LOG.info("Waiting for number of events to become " + expectedNumEvents
+ ". It is now " + containerLauncher.numEventsProcessing.get());
Thread.sleep(1000);
}
Assert.assertEquals(expectedNumEvents,
containerLauncher.numEventsProcessing.get());
}
@Test(timeout = 15000)
public void testSlowNM() throws Exception {
conf = new Configuration();
int maxAttempts = 1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
// set timeout low for the test
conf.setInt("yarn.rpc.nm-command-timeout", 3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL, HadoopYarnProtoRPC.class.getName());
YarnRPC rpc = YarnRPC.create(conf);
String bindAddr = "localhost:0";
InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
NMTokenSecretManagerInNM tokenSecretManager =
new NMTokenSecretManagerInNM();
MasterKey masterKey = Records.newRecord(MasterKey.class);
masterKey.setBytes(ByteBuffer.wrap("key".getBytes()));
tokenSecretManager.setMasterKey(masterKey);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"token");
server =
rpc.getServer(ContainerManagementProtocol.class,
new DummyContainerManager(), addr, conf, tokenSecretManager, 1);
server.start();
MRApp app = new MRAppWithSlowNM(tokenSecretManager);
try {
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
app.waitForState(task, TaskState.SCHEDULED);
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
.next().getAttempts();
Assert.assertEquals("Num attempts is not correct", maxAttempts,
attempts.size());
TaskAttempt attempt = attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl) attempt,
TaskAttemptStateInternal.ASSIGNED);
app.waitForState(job, JobState.FAILED);
String diagnostics = attempt.getDiagnostics().toString();
LOG.info("attempt.getDiagnostics: " + diagnostics);
Assert.assertTrue(diagnostics.contains("Container launch failed for "
+ "container_0_0000_01_000000 : "));
Assert
.assertTrue(diagnostics
.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel"));
} finally {
server.stop();
app.stop();
}
}
private final class CustomContainerLauncher extends ContainerLauncherImpl {
private volatile int expectedCorePoolSize = 0;
private AtomicInteger numEventsProcessing = new AtomicInteger(0);
private AtomicInteger numEventsProcessed = new AtomicInteger(0);
private volatile String foundErrors = null;
private volatile boolean finishEventHandling;
private CustomContainerLauncher(AppContext context) {
super(context);
}
public ThreadPoolExecutor getThreadPool() {
return super.launcherPool;
}
private final class CustomEventProcessor extends
ContainerLauncherImpl.EventProcessor {
private final ContainerLauncherEvent event;
private CustomEventProcessor(ContainerLauncherEvent event) {
super(event);
this.event = event;
}
@Override
public void run() {
// do nothing substantial
LOG.info("Processing the event " + event.toString());
numEventsProcessing.incrementAndGet();
// Stall
while (!finishEventHandling) {
synchronized (this) {
try {
wait(1000);
} catch (InterruptedException e) {
;
}
}
}
numEventsProcessed.incrementAndGet();
}
}
protected ContainerLauncherImpl.EventProcessor createEventProcessor(
final ContainerLauncherEvent event) {
// At this point of time, the EventProcessor is being created and so no
// additional threads would have been created.
// Core-pool-size should have increased by now.
if (expectedCorePoolSize != launcherPool.getCorePoolSize()) {
foundErrors = "Expected " + expectedCorePoolSize + " but found "
+ launcherPool.getCorePoolSize();
}
return new CustomEventProcessor(event);
}
}
private class MRAppWithSlowNM extends MRApp {
private NMTokenSecretManagerInNM tokenSecretManager;
public MRAppWithSlowNM(NMTokenSecretManagerInNM tokenSecretManager) {
super(1, 0, false, "TestContainerLauncher", true);
this.tokenSecretManager = tokenSecretManager;
}
@Override
protected ContainerLauncher
createContainerLauncher(final AppContext context) {
return new ContainerLauncherImpl(context) {
@Override
public ContainerManagementProtocolProxyData getCMProxy(
String containerMgrBindAddr, ContainerId containerId)
throws IOException {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
String containerManagerBindAddr =
addr.getHostName() + ":" + addr.getPort();
Token token =
tokenSecretManager.createNMToken(
containerId.getApplicationAttemptId(),
NodeId.newInstance(addr.getHostName(), addr.getPort()), "user");
ContainerManagementProtocolProxy cmProxy =
new ContainerManagementProtocolProxy(conf);
ContainerManagementProtocolProxyData proxy =
cmProxy.new ContainerManagementProtocolProxyData(
YarnRPC.create(conf), containerManagerBindAddr, containerId,
token);
return proxy;
}
};
};
}
public class DummyContainerManager implements ContainerManagementProtocol {
private ContainerStatus status = null;
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws IOException {
List<ContainerStatus> statuses = new ArrayList<ContainerStatus>();
statuses.add(status);
return GetContainerStatusesResponse.newInstance(statuses, null);
}
@Override
public StartContainersResponse startContainers(StartContainersRequest requests)
throws IOException {
StartContainerRequest request = requests.getStartContainerRequests().get(0);
ContainerTokenIdentifier containerTokenIdentifier =
MRApp.newContainerTokenIdentifier(request.getContainerToken());
// Validate that the container is what RM is giving.
Assert.assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT,
containerTokenIdentifier.getNmHostAddress());
StartContainersResponse response = recordFactory
.newRecordInstance(StartContainersResponse.class);
status = recordFactory.newRecordInstance(ContainerStatus.class);
try {
// make the thread sleep to look like its not going to respond
Thread.sleep(15000);
} catch (Exception e) {
LOG.error(e);
throw new UndeclaredThrowableException(e);
}
status.setState(ContainerState.RUNNING);
status.setContainerId(containerTokenIdentifier.getContainerID());
status.setExitStatus(0);
return response;
}
@Override
public StopContainersResponse stopContainers(StopContainersRequest request)
throws IOException {
Exception e = new Exception("Dummy function", new Exception(
"Dummy function cause"));
throw new IOException(e);
}
}
}
| 18,504
| 39.759912
| 116
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.launcher;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.atLeast;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher.EventType;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
public class TestContainerLauncherImpl {
static final Log LOG = LogFactory.getLog(TestContainerLauncherImpl.class);
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
private Map<String, ByteBuffer> serviceResponse =
new HashMap<String, ByteBuffer>();
@Before
public void setup() throws IOException {
serviceResponse.clear();
serviceResponse.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeMetaData(80));
}
// tests here mock ContainerManagementProtocol which does not have close
// method. creating an interface that implements ContainerManagementProtocol
// and Closeable so the tests does not fail with NoSuchMethodException
private static interface ContainerManagementProtocolClient extends
ContainerManagementProtocol, Closeable {
}
private static class ContainerLauncherImplUnderTest extends
ContainerLauncherImpl {
private ContainerManagementProtocol containerManager;
public ContainerLauncherImplUnderTest(AppContext context,
ContainerManagementProtocol containerManager) {
super(context);
this.containerManager = containerManager;
}
@Override
public ContainerManagementProtocolProxyData getCMProxy(
String containerMgrBindAddr, ContainerId containerId)
throws IOException {
ContainerManagementProtocolProxyData protocolProxy =
mock(ContainerManagementProtocolProxyData.class);
when(protocolProxy.getContainerManagementProtocol()).thenReturn(
containerManager);
return protocolProxy;
}
public void waitForPoolToIdle() throws InterruptedException {
//I wish that we did not need the sleep, but it is here so that we are sure
// That the other thread had time to insert the event into the queue and
// start processing it. For some reason we were getting interrupted
// exceptions within eventQueue without this sleep.
Thread.sleep(100l);
LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
" POOL SIZE 2: "+this.launcherPool.getQueue().size()+
" ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
while(!this.eventQueue.isEmpty() ||
!this.launcherPool.getQueue().isEmpty() ||
this.launcherPool.getActiveCount() > 0) {
Thread.sleep(100l);
LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
" POOL SIZE 2: "+this.launcherPool.getQueue().size()+
" ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
}
LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
" POOL SIZE 2: "+this.launcherPool.getQueue().size()+
" ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
}
}
public static ContainerId makeContainerId(long ts, int appId, int attemptId,
int id) {
return ContainerId.newContainerId(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(ts, appId), attemptId), id);
}
public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId,
TaskType taskType, int id) {
ApplicationId aID = ApplicationId.newInstance(ts, appId);
JobId jID = MRBuilderUtils.newJobId(aID, id);
TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
return MRBuilderUtils.newTaskAttemptId(tID, id);
}
@Test(timeout = 5000)
public void testHandle() throws Exception {
LOG.info("STARTING testHandle");
AppContext mockContext = mock(AppContext.class);
@SuppressWarnings("rawtypes")
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
String cmAddress = "127.0.0.1:8000";
ContainerManagementProtocolClient mockCM =
mock(ContainerManagementProtocolClient.class);
ContainerLauncherImplUnderTest ut =
new ContainerLauncherImplUnderTest(mockContext, mockCM);
Configuration conf = new Configuration();
ut.init(conf);
ut.start();
try {
ContainerId contId = makeContainerId(0l, 0, 0, 1);
TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
StartContainersResponse startResp =
recordFactory.newRecordInstance(StartContainersResponse.class);
startResp.setAllServicesMetaData(serviceResponse);
LOG.info("inserting launch event");
ContainerRemoteLaunchEvent mockLaunchEvent =
mock(ContainerRemoteLaunchEvent.class);
when(mockLaunchEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
when(mockLaunchEvent.getContainerID())
.thenReturn(contId);
when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
when(mockLaunchEvent.getContainerToken()).thenReturn(
createNewContainerToken(contId, cmAddress));
ut.handle(mockLaunchEvent);
ut.waitForPoolToIdle();
verify(mockCM).startContainers(any(StartContainersRequest.class));
LOG.info("inserting cleanup event");
ContainerLauncherEvent mockCleanupEvent =
mock(ContainerLauncherEvent.class);
when(mockCleanupEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
when(mockCleanupEvent.getContainerID())
.thenReturn(contId);
when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
ut.handle(mockCleanupEvent);
ut.waitForPoolToIdle();
verify(mockCM).stopContainers(any(StopContainersRequest.class));
} finally {
ut.stop();
}
}
@Test(timeout = 5000)
public void testOutOfOrder() throws Exception {
LOG.info("STARTING testOutOfOrder");
AppContext mockContext = mock(AppContext.class);
@SuppressWarnings("rawtypes")
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
ContainerManagementProtocolClient mockCM =
mock(ContainerManagementProtocolClient.class);
ContainerLauncherImplUnderTest ut =
new ContainerLauncherImplUnderTest(mockContext, mockCM);
Configuration conf = new Configuration();
ut.init(conf);
ut.start();
try {
ContainerId contId = makeContainerId(0l, 0, 0, 1);
TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
String cmAddress = "127.0.0.1:8000";
StartContainersResponse startResp =
recordFactory.newRecordInstance(StartContainersResponse.class);
startResp.setAllServicesMetaData(serviceResponse);
LOG.info("inserting cleanup event");
ContainerLauncherEvent mockCleanupEvent =
mock(ContainerLauncherEvent.class);
when(mockCleanupEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
when(mockCleanupEvent.getContainerID())
.thenReturn(contId);
when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
ut.handle(mockCleanupEvent);
ut.waitForPoolToIdle();
verify(mockCM, never()).stopContainers(any(StopContainersRequest.class));
LOG.info("inserting launch event");
ContainerRemoteLaunchEvent mockLaunchEvent =
mock(ContainerRemoteLaunchEvent.class);
when(mockLaunchEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
when(mockLaunchEvent.getContainerID())
.thenReturn(contId);
when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
when(mockLaunchEvent.getContainerToken()).thenReturn(
createNewContainerToken(contId, cmAddress));
ut.handle(mockLaunchEvent);
ut.waitForPoolToIdle();
verify(mockCM, never()).startContainers(any(StartContainersRequest.class));
} finally {
ut.stop();
}
}
@Test(timeout = 5000)
public void testMyShutdown() throws Exception {
LOG.info("in test Shutdown");
AppContext mockContext = mock(AppContext.class);
@SuppressWarnings("rawtypes")
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
ContainerManagementProtocolClient mockCM =
mock(ContainerManagementProtocolClient.class);
ContainerLauncherImplUnderTest ut =
new ContainerLauncherImplUnderTest(mockContext, mockCM);
Configuration conf = new Configuration();
ut.init(conf);
ut.start();
try {
ContainerId contId = makeContainerId(0l, 0, 0, 1);
TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
String cmAddress = "127.0.0.1:8000";
StartContainersResponse startResp =
recordFactory.newRecordInstance(StartContainersResponse.class);
startResp.setAllServicesMetaData(serviceResponse);
LOG.info("inserting launch event");
ContainerRemoteLaunchEvent mockLaunchEvent =
mock(ContainerRemoteLaunchEvent.class);
when(mockLaunchEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
when(mockLaunchEvent.getContainerID())
.thenReturn(contId);
when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
when(mockCM.startContainers(any(StartContainersRequest.class))).thenReturn(startResp);
when(mockLaunchEvent.getContainerToken()).thenReturn(
createNewContainerToken(contId, cmAddress));
ut.handle(mockLaunchEvent);
ut.waitForPoolToIdle();
verify(mockCM).startContainers(any(StartContainersRequest.class));
// skip cleanup and make sure stop kills the container
} finally {
ut.stop();
verify(mockCM).stopContainers(any(StopContainersRequest.class));
}
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test(timeout = 5000)
public void testContainerCleaned() throws Exception {
LOG.info("STARTING testContainerCleaned");
CyclicBarrier startLaunchBarrier = new CyclicBarrier(2);
CyclicBarrier completeLaunchBarrier = new CyclicBarrier(2);
AppContext mockContext = mock(AppContext.class);
EventHandler mockEventHandler = mock(EventHandler.class);
when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
ContainerManagementProtocolClient mockCM =
new ContainerManagerForTest(startLaunchBarrier, completeLaunchBarrier);
ContainerLauncherImplUnderTest ut =
new ContainerLauncherImplUnderTest(mockContext, mockCM);
Configuration conf = new Configuration();
ut.init(conf);
ut.start();
try {
ContainerId contId = makeContainerId(0l, 0, 0, 1);
TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
String cmAddress = "127.0.0.1:8000";
StartContainersResponse startResp =
recordFactory.newRecordInstance(StartContainersResponse.class);
startResp.setAllServicesMetaData(serviceResponse);
LOG.info("inserting launch event");
ContainerRemoteLaunchEvent mockLaunchEvent =
mock(ContainerRemoteLaunchEvent.class);
when(mockLaunchEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
when(mockLaunchEvent.getContainerID())
.thenReturn(contId);
when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
when(mockLaunchEvent.getContainerToken()).thenReturn(
createNewContainerToken(contId, cmAddress));
ut.handle(mockLaunchEvent);
startLaunchBarrier.await();
LOG.info("inserting cleanup event");
ContainerLauncherEvent mockCleanupEvent =
mock(ContainerLauncherEvent.class);
when(mockCleanupEvent.getType())
.thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
when(mockCleanupEvent.getContainerID())
.thenReturn(contId);
when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
ut.handle(mockCleanupEvent);
completeLaunchBarrier.await();
ut.waitForPoolToIdle();
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(mockEventHandler, atLeast(2)).handle(arg.capture());
boolean containerCleaned = false;
for (int i =0; i < arg.getAllValues().size(); i++) {
LOG.info(arg.getAllValues().get(i).toString());
Event currentEvent = arg.getAllValues().get(i);
if (currentEvent.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) {
containerCleaned = true;
}
}
assert(containerCleaned);
} finally {
ut.stop();
}
}
private Token createNewContainerToken(ContainerId contId,
String containerManagerAddr) {
long currentTime = System.currentTimeMillis();
return MRApp.newContainerToken(NodeId.newInstance("127.0.0.1",
1234), "password".getBytes(), new ContainerTokenIdentifier(
contId, containerManagerAddr, "user",
Resource.newInstance(1024, 1),
currentTime + 10000L, 123, currentTime, Priority.newInstance(0), 0));
}
private static class ContainerManagerForTest implements ContainerManagementProtocolClient {
private CyclicBarrier startLaunchBarrier;
private CyclicBarrier completeLaunchBarrier;
ContainerManagerForTest (CyclicBarrier startLaunchBarrier, CyclicBarrier completeLaunchBarrier) {
this.startLaunchBarrier = startLaunchBarrier;
this.completeLaunchBarrier = completeLaunchBarrier;
}
@Override
public StartContainersResponse startContainers(StartContainersRequest request)
throws IOException {
try {
startLaunchBarrier.await();
completeLaunchBarrier.await();
//To ensure the kill is started before the launch
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
} catch (BrokenBarrierException e) {
e.printStackTrace();
}
throw new IOException(new ContainerException("Force fail CM"));
}
@Override
public StopContainersResponse stopContainers(StopContainersRequest request)
throws IOException {
return null;
}
@Override
public GetContainerStatusesResponse getContainerStatuses(
GetContainerStatusesRequest request) throws IOException {
return null;
}
@Override
public void close() throws IOException {
}
}
@SuppressWarnings("serial")
private static class ContainerException extends YarnException {
public ContainerException(String message) {
super(message);
}
@Override
public YarnException getCause() {
return null;
}
}
}
| 18,850
| 38.602941
| 116
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.local;
import static org.mockito.Matchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease;
import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NMToken;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.client.ClientRMProxy;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.Test;
public class TestLocalContainerAllocator {
@Test
public void testRMConnectionRetry() throws Exception {
// verify the connection exception is thrown
// if we haven't exhausted the retry interval
ApplicationMasterProtocol mockScheduler =
mock(ApplicationMasterProtocol.class);
when(mockScheduler.allocate(isA(AllocateRequest.class)))
.thenThrow(RPCUtil.getRemoteException(new IOException("forcefail")));
Configuration conf = new Configuration();
LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
Assert.fail("heartbeat was supposed to throw");
} catch (YarnException e) {
// YarnException is expected
} finally {
lca.stop();
}
// verify YarnRuntimeException is thrown when the retry interval has expired
conf.setLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, 0);
lca = new StubbedLocalContainerAllocator(mockScheduler);
lca.init(conf);
lca.start();
try {
lca.heartbeat();
Assert.fail("heartbeat was supposed to throw");
} catch (YarnRuntimeException e) {
// YarnRuntimeException is expected
} finally {
lca.stop();
}
}
@Test
public void testAllocResponseId() throws Exception {
ApplicationMasterProtocol scheduler = new MockScheduler();
Configuration conf = new Configuration();
LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(scheduler);
lca.init(conf);
lca.start();
// do two heartbeats to verify the response ID is being tracked
lca.heartbeat();
lca.heartbeat();
lca.close();
}
@Test
public void testAMRMTokenUpdate() throws Exception {
Configuration conf = new Configuration();
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1, 1), 1);
AMRMTokenIdentifier oldTokenId = new AMRMTokenIdentifier(attemptId, 1);
AMRMTokenIdentifier newTokenId = new AMRMTokenIdentifier(attemptId, 2);
Token<AMRMTokenIdentifier> oldToken = new Token<AMRMTokenIdentifier>(
oldTokenId.getBytes(), "oldpassword".getBytes(), oldTokenId.getKind(),
new Text());
Token<AMRMTokenIdentifier> newToken = new Token<AMRMTokenIdentifier>(
newTokenId.getBytes(), "newpassword".getBytes(), newTokenId.getKind(),
new Text());
MockScheduler scheduler = new MockScheduler();
scheduler.amToken = newToken;
final LocalContainerAllocator lca =
new StubbedLocalContainerAllocator(scheduler);
lca.init(conf);
lca.start();
UserGroupInformation testUgi = UserGroupInformation.createUserForTesting(
"someuser", new String[0]);
testUgi.addToken(oldToken);
testUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
lca.heartbeat();
return null;
}
});
lca.close();
// verify there is only one AMRM token in the UGI and it matches the
// updated token from the RM
int tokenCount = 0;
Token<? extends TokenIdentifier> ugiToken = null;
for (Token<? extends TokenIdentifier> token : testUgi.getTokens()) {
if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())) {
ugiToken = token;
++tokenCount;
}
}
Assert.assertEquals("too many AMRM tokens", 1, tokenCount);
Assert.assertArrayEquals("token identifier not updated",
newToken.getIdentifier(), ugiToken.getIdentifier());
Assert.assertArrayEquals("token password not updated",
newToken.getPassword(), ugiToken.getPassword());
Assert.assertEquals("AMRM token service not updated",
new Text(ClientRMProxy.getAMRMTokenService(conf)),
ugiToken.getService());
}
private static class StubbedLocalContainerAllocator
extends LocalContainerAllocator {
private ApplicationMasterProtocol scheduler;
public StubbedLocalContainerAllocator(ApplicationMasterProtocol scheduler) {
super(mock(ClientService.class), createAppContext(),
"nmhost", 1, 2, null);
this.scheduler = scheduler;
}
@Override
protected void register() {
}
@Override
protected void unregister() {
}
@Override
protected void startAllocatorThread() {
allocatorThread = new Thread();
}
@Override
protected ApplicationMasterProtocol createSchedulerProxy() {
return scheduler;
}
private static AppContext createAppContext() {
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId attemptId =
ApplicationAttemptId.newInstance(appId, 1);
Job job = mock(Job.class);
@SuppressWarnings("rawtypes")
EventHandler eventHandler = mock(EventHandler.class);
AppContext ctx = mock(AppContext.class);
when(ctx.getApplicationID()).thenReturn(appId);
when(ctx.getApplicationAttemptId()).thenReturn(attemptId);
when(ctx.getJob(isA(JobId.class))).thenReturn(job);
when(ctx.getClusterInfo()).thenReturn(
new ClusterInfo(Resource.newInstance(10240, 1)));
when(ctx.getEventHandler()).thenReturn(eventHandler);
return ctx;
}
}
private static class MockScheduler implements ApplicationMasterProtocol {
int responseId = 0;
Token<AMRMTokenIdentifier> amToken = null;
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(
RegisterApplicationMasterRequest request) throws YarnException,
IOException {
return null;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request) throws YarnException,
IOException {
return null;
}
@Override
public AllocateResponse allocate(AllocateRequest request)
throws YarnException, IOException {
Assert.assertEquals("response ID mismatch",
responseId, request.getResponseId());
++responseId;
org.apache.hadoop.yarn.api.records.Token yarnToken = null;
if (amToken != null) {
yarnToken = org.apache.hadoop.yarn.api.records.Token.newInstance(
amToken.getIdentifier(), amToken.getKind().toString(),
amToken.getPassword(), amToken.getService().toString());
}
return AllocateResponse.newInstance(responseId,
Collections.<ContainerStatus>emptyList(),
Collections.<Container>emptyList(),
Collections.<NodeReport>emptyList(),
Resources.none(), null, 1, null,
Collections.<NMToken>emptyList(),
yarnToken,
Collections.<ContainerResourceIncrease>emptyList(),
Collections.<ContainerResourceDecrease>emptyList());
}
}
}
| 9,944
| 36.958015
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/TestDataStatistics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.speculate;
import org.junit.Assert;
import org.junit.Test;
public class TestDataStatistics {
private static final double TOL = 0.001;
@Test
public void testEmptyDataStatistics() throws Exception {
DataStatistics statistics = new DataStatistics();
Assert.assertEquals(0, statistics.count(), TOL);
Assert.assertEquals(0, statistics.mean(), TOL);
Assert.assertEquals(0, statistics.var(), TOL);
Assert.assertEquals(0, statistics.std(), TOL);
Assert.assertEquals(0, statistics.outlier(1.0f), TOL);
}
@Test
public void testSingleEntryDataStatistics() throws Exception {
DataStatistics statistics = new DataStatistics(17.29);
Assert.assertEquals(1, statistics.count(), TOL);
Assert.assertEquals(17.29, statistics.mean(), TOL);
Assert.assertEquals(0, statistics.var(), TOL);
Assert.assertEquals(0, statistics.std(), TOL);
Assert.assertEquals(17.29, statistics.outlier(1.0f), TOL);
}
@Test
public void testMutiEntryDataStatistics() throws Exception {
DataStatistics statistics = new DataStatistics();
statistics.add(17);
statistics.add(29);
Assert.assertEquals(2, statistics.count(), TOL);
Assert.assertEquals(23.0, statistics.mean(), TOL);
Assert.assertEquals(36.0, statistics.var(), TOL);
Assert.assertEquals(6.0, statistics.std(), TOL);
Assert.assertEquals(29.0, statistics.outlier(1.0f), TOL);
}
@Test
public void testUpdateStatistics() throws Exception {
DataStatistics statistics = new DataStatistics(17);
statistics.add(29);
Assert.assertEquals(2, statistics.count(), TOL);
Assert.assertEquals(23.0, statistics.mean(), TOL);
Assert.assertEquals(36.0, statistics.var(), TOL);
statistics.updateStatistics(17, 29);
Assert.assertEquals(2, statistics.count(), TOL);
Assert.assertEquals(29.0, statistics.mean(), TOL);
Assert.assertEquals(0.0, statistics.var(), TOL);
}
}
| 2,753
| 36.216216
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestShuffleProvider.java
|
/**
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import java.nio.ByteBuffer;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapred.WrappedJvmID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.SystemClock;
import org.apache.hadoop.yarn.server.api.AuxiliaryService;
import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Test;
import org.junit.Assert;
public class TestShuffleProvider {
@Test
public void testShuffleProviders() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
Path jobFile = mock(Path.class);
EventHandler eventHandler = mock(EventHandler.class);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(YarnConfiguration.NM_AUX_SERVICES,
TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," +
TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
String serviceName = TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
String serviceStr = String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, serviceName);
jobConf.set(serviceStr, TestShuffleHandler1.class.getName());
serviceName = TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
serviceStr = String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, serviceName);
jobConf.set(serviceStr, TestShuffleHandler2.class.getName());
jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES,
TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID
+ "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
Credentials credentials = new Credentials();
Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
("tokenid").getBytes(), ("tokenpw").getBytes(),
new Text("tokenkind"), new Text("tokenservice"));
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
mock(TaskSplitMetaInfo.class), jobConf, taListener,
jobToken, credentials,
new SystemClock(), null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
ContainerLaunchContext launchCtx =
TaskAttemptImpl.createContainerLaunchContext(null,
jobConf, jobToken, taImpl.createRemoteTask(),
TypeConverter.fromYarn(jobId),
mock(WrappedJvmID.class), taListener,
credentials);
Map<String, ByteBuffer> serviceDataMap = launchCtx.getServiceData();
Assert.assertNotNull("TestShuffleHandler1 is missing", serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertNotNull("TestShuffleHandler2 is missing", serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertTrue("mismatch number of services in map", serviceDataMap.size() == 3); // 2 that we entered + 1 for the built-in shuffle-provider
}
static public class StubbedFS extends RawLocalFileSystem {
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return new FileStatus(1, false, 1, 1, 1, f);
}
}
static public class TestShuffleHandler1 extends AuxiliaryService {
public static final String MAPREDUCE_TEST_SHUFFLE_SERVICEID = "test_shuffle1";
public TestShuffleHandler1() {
super("testshuffle1");
}
@Override
public void initializeApplication(ApplicationInitializationContext context) {
}
@Override
public void stopApplication(ApplicationTerminationContext context) {
}
@Override
public synchronized ByteBuffer getMetaData() {
return ByteBuffer.allocate(0); // Don't 'return null' because of YARN-1256
}
}
static public class TestShuffleHandler2 extends AuxiliaryService {
public static final String MAPREDUCE_TEST_SHUFFLE_SERVICEID = "test_shuffle2";
public TestShuffleHandler2() {
super("testshuffle2");
}
@Override
public void initializeApplication(ApplicationInitializationContext context) {
}
@Override
public void stopApplication(ApplicationTerminationContext context) {
}
@Override
public synchronized ByteBuffer getMetaData() {
return ByteBuffer.allocate(0); // Don't 'return null' because of YARN-1256
}
}
}
| 6,844
| 41.78125
| 147
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Locality;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
@SuppressWarnings({"unchecked", "rawtypes"})
public class TestTaskAttempt{
static public class StubbedFS extends RawLocalFileSystem {
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return new FileStatus(1, false, 1, 1, 1, f);
}
}
@Test
public void testMRAppHistoryForMap() throws Exception {
MRApp app = new FailingAttemptsMRApp(1, 0);
testMRAppHistory(app);
}
@Test
public void testMRAppHistoryForReduce() throws Exception {
MRApp app = new FailingAttemptsMRApp(0, 1);
testMRAppHistory(app);
}
@Test
public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "host1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt, mockTAEvent);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedRacks = cre.getRacks();
//Only a single occurrence of /DefaultRack
assertEquals(1, requestedRacks.length);
}
@Test
public void testHostResolveAttempt() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct =
new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler = mock(EventHandler.class);
String[] hosts = new String[3];
hosts[0] = "192.168.1.1";
hosts[1] = "host2";
hosts[2] = "host3";
TaskSplitMetaInfo splitInfo =
new TaskSplitMetaInfo(hosts, 0, 128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt =
createMapTaskAttemptImplForTest(eventHandler, splitInfo);
TaskAttemptImpl spyTa = spy(mockTaskAttempt);
when(spyTa.resolveHost(hosts[0])).thenReturn("host1");
spyTa.dataLocalHosts = spyTa.resolveHosts(splitInfo.getLocations());
TaskAttemptEvent mockTAEvent = mock(TaskAttemptEvent.class);
rct.transition(spyTa, mockTAEvent);
verify(spyTa).resolveHost(hosts[0]);
ArgumentCaptor<Event> arg = ArgumentCaptor.forClass(Event.class);
verify(eventHandler, times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
Map<String, Boolean> expected = new HashMap<String, Boolean>();
expected.put("host1", true);
expected.put("host2", true);
expected.put("host3", true);
ContainerRequestEvent cre =
(ContainerRequestEvent) arg.getAllValues().get(1);
String[] requestedHosts = cre.getHosts();
for (String h : requestedHosts) {
expected.remove(h);
}
assertEquals(0, expected.size());
}
@Test
public void testMillisCountersUpdate() throws Exception {
verifyMillisCounters(2048, 2048, 1024);
verifyMillisCounters(2048, 1024, 1024);
verifyMillisCounters(10240, 1024, 2048);
}
public void verifyMillisCounters(int mapMemMb, int reduceMemMb,
int minContainerSize) throws Exception {
Clock actualClock = new SystemClock();
ControlledClock clock = new ControlledClock(actualClock);
clock.setTime(10);
MRApp app =
new MRApp(1, 1, false, "testSlotMillisCounterUpdate", true, clock);
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MAP_MEMORY_MB, mapMemMb);
conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, reduceMemMb);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
minContainerSize);
app.setClusterInfo(new ClusterInfo(Resource.newInstance(10240, 1)));
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 2, tasks.size());
Iterator<Task> taskIter = tasks.values().iterator();
Task mTask = taskIter.next();
app.waitForState(mTask, TaskState.RUNNING);
Task rTask = taskIter.next();
app.waitForState(rTask, TaskState.RUNNING);
Map<TaskAttemptId, TaskAttempt> mAttempts = mTask.getAttempts();
Assert.assertEquals("Num attempts is not correct", 1, mAttempts.size());
Map<TaskAttemptId, TaskAttempt> rAttempts = rTask.getAttempts();
Assert.assertEquals("Num attempts is not correct", 1, rAttempts.size());
TaskAttempt mta = mAttempts.values().iterator().next();
TaskAttempt rta = rAttempts.values().iterator().next();
app.waitForState(mta, TaskAttemptState.RUNNING);
app.waitForState(rta, TaskAttemptState.RUNNING);
clock.setTime(11);
app.getContext()
.getEventHandler()
.handle(new TaskAttemptEvent(mta.getID(), TaskAttemptEventType.TA_DONE));
app.getContext()
.getEventHandler()
.handle(new TaskAttemptEvent(rta.getID(), TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
Assert.assertEquals(mta.getFinishTime(), 11);
Assert.assertEquals(mta.getLaunchTime(), 10);
Assert.assertEquals(rta.getFinishTime(), 11);
Assert.assertEquals(rta.getLaunchTime(), 10);
Counters counters = job.getAllCounters();
Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize),
counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue());
Assert.assertEquals((int) Math.ceil((float) reduceMemMb / minContainerSize),
counters.findCounter(JobCounter.SLOTS_MILLIS_REDUCES).getValue());
Assert.assertEquals(1,
counters.findCounter(JobCounter.MILLIS_MAPS).getValue());
Assert.assertEquals(1,
counters.findCounter(JobCounter.MILLIS_REDUCES).getValue());
Assert.assertEquals(mapMemMb,
counters.findCounter(JobCounter.MB_MILLIS_MAPS).getValue());
Assert.assertEquals(reduceMemMb,
counters.findCounter(JobCounter.MB_MILLIS_REDUCES).getValue());
Assert.assertEquals(1,
counters.findCounter(JobCounter.VCORES_MILLIS_MAPS).getValue());
Assert.assertEquals(1,
counters.findCounter(JobCounter.VCORES_MILLIS_REDUCES).getValue());
}
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo) {
Clock clock = new SystemClock();
return createMapTaskAttemptImplForTest(eventHandler, taskSplitMetaInfo, clock);
}
private TaskAttemptImpl createMapTaskAttemptImplForTest(
EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
Path jobFile = mock(Path.class);
JobConf jobConf = new JobConf();
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
taskSplitMetaInfo, jobConf, taListener, null,
null, clock, null);
return taImpl;
}
private void testMRAppHistory(MRApp app) throws Exception {
Configuration conf = new Configuration();
Job job = app.submit(conf);
app.waitForState(job, JobState.FAILED);
Map<TaskId, Task> tasks = job.getTasks();
Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
Task task = tasks.values().iterator().next();
Assert.assertEquals("Task state not correct", TaskState.FAILED, task
.getReport().getTaskState());
Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator().next()
.getAttempts();
Assert.assertEquals("Num attempts is not correct", 4, attempts.size());
Iterator<TaskAttempt> it = attempts.values().iterator();
TaskAttemptReport report = it.next().getReport();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
report.getTaskAttemptState());
Assert.assertEquals("Diagnostic Information is not Correct",
"Test Diagnostic Event", report.getDiagnosticInfo());
report = it.next().getReport();
Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
report.getTaskAttemptState());
}
static class FailingAttemptsMRApp extends MRApp {
FailingAttemptsMRApp(int maps, int reduces) {
super(maps, reduces, true, "FailingAttemptsMRApp", true);
}
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
getContext().getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(attemptID,
"Test Diagnostic Event"));
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
return new EventHandler<JobHistoryEvent>() {
@Override
public void handle(JobHistoryEvent event) {
if (event.getType() == org.apache.hadoop.mapreduce.jobhistory.EventType.MAP_ATTEMPT_FAILED) {
TaskAttemptUnsuccessfulCompletion datum = (TaskAttemptUnsuccessfulCompletion) event
.getHistoryEvent().getDatum();
Assert.assertEquals("Diagnostic Information is not Correct",
"Test Diagnostic Event", datum.get(8).toString());
}
}
};
}
}
@Test
public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), null);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",
Locality.NODE_LOCAL, taImpl.getLocality());
}
@Test
public void testContainerCleanedWhileRunning() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(),
TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local rack",
Locality.RACK_LOCAL, taImpl.getLocality());
}
@Test
public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state", taImpl.getState(),
TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
assertEquals("Task attempt is assigned locally", Locality.OFF_SWITCH,
taImpl.getLocality());
}
@Test
public void testDoubleTooManyFetchFailure() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
TaskId reduceTaskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
TaskAttemptId reduceTAId =
MRBuilderUtils.newTaskAttemptId(reduceTaskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
new Token(), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(attemptId,
reduceTAId, "Host"));
assertEquals("Task attempt is not in FAILED state", taImpl.getState(),
TaskAttemptState.FAILED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state, still", taImpl.getState(),
TaskAttemptState.FAILED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
}
@Test
public void testAppDiognosticEventOnUnassignedTask() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener,
new Token(), new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,
"Task got killed"));
assertFalse(
"InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",
eventHandler.internalError);
}
@Test
public void testTooManyFetchFailureAfterKill() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
mock(Token.class), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.KILLED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in KILLED state, still", taImpl.getState(),
TaskAttemptState.KILLED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",
eventHandler.internalError);
}
@Test
public void testAppDiognosticEventOnNewTask() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener,
new Token(), new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptId,
"Task got killed"));
assertFalse(
"InternalError occurred trying to handle TA_DIAGNOSTICS_UPDATE on assigned task",
eventHandler.internalError);
}
@Test
public void testFetchFailureAttemptFinishTime() throws Exception{
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
TaskId reducetaskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
TaskAttemptId reduceTAId =
MRBuilderUtils.newTaskAttemptId(reducetaskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,mock(Token.class), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
assertEquals("Task attempt is not in succeeded state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertTrue("Task Attempt finish time is not greater than 0",
taImpl.getFinishTime() > 0);
Long finishTime = taImpl.getFinishTime();
Thread.sleep(5);
taImpl.handle(new TaskAttemptTooManyFetchFailureEvent(attemptId,
reduceTAId, "Host"));
assertEquals("Task attempt is not in Too Many Fetch Failure state",
taImpl.getState(), TaskAttemptState.FAILED);
assertEquals("After TA_TOO_MANY_FETCH_FAILURE,"
+ " Task attempt finish time is not the same ",
finishTime, Long.valueOf(taImpl.getFinishTime()));
}
@Test
public void testContainerKillAfterAssigned() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId,
0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener, new Token(),
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container,
mock(Map.class)));
assertEquals("Task attempt is not in assinged state",
taImpl.getInternalState(), TaskAttemptStateInternal.ASSIGNED);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertEquals("Task should be in KILLED state",
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
taImpl.getInternalState());
}
@Test
public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId,
0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener, new Token(),
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container,
mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(),
TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",
eventHandler.internalError);
assertEquals("Task should be in KILLED state",
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
taImpl.getInternalState());
}
@Test
public void testContainerKillWhileCommitPending() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId,
0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(
new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] { "127.0.0.1" });
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
Resource resource = mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler,
jobFile, 1, splits, jobConf, taListener, new Token(),
new Credentials(), new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.2", 0);
ContainerId contId = ContainerId.newContainerId(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId, container,
mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
assertEquals("Task attempt is not in running state", taImpl.getState(),
TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task should be in COMMIT_PENDING state",
TaskAttemptStateInternal.COMMIT_PENDING, taImpl.getInternalState());
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",
eventHandler.internalError);
assertEquals("Task should be in KILLED state",
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
taImpl.getInternalState());
}
@Test
public void testKillMapTaskWhileSuccessFinishing() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_DONE));
assertEquals("Task attempt is not in SUCCEEDED state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertEquals("Task attempt's internal state is not " +
"SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
// If the map task is killed when it is in SUCCESS_FINISHING_CONTAINER
// state, the state will move to KILL_CONTAINER_CLEANUP
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.KILLED);
assertEquals("Task attempt's internal state is not KILL_CONTAINER_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt's internal state is not KILL_TASK_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.KILL_TASK_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CLEANUP_DONE));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.KILLED);
assertFalse("InternalError occurred", eventHandler.internalError);
}
@Test
public void testKillMapTaskWhileFailFinishing() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_FAILMSG));
assertEquals("Task attempt is not in FAILED state", taImpl.getState(),
TaskAttemptState.FAILED);
assertEquals("Task attempt's internal state is not " +
"FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
// If the map task is killed when it is in FAIL_FINISHING_CONTAINER state,
// the state will stay in FAIL_FINISHING_CONTAINER.
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.FAILED);
assertEquals("Task attempt's internal state is not " +
"FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_TIMED_OUT));
assertEquals("Task attempt's internal state is not FAIL_CONTAINER_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt's internal state is not FAIL_TASK_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_TASK_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CLEANUP_DONE));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.FAILED);
assertFalse("InternalError occurred", eventHandler.internalError);
}
@Test
public void testFailMapTaskByClient() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.FAILED);
assertEquals("Task attempt's internal state is not " +
"FAIL_CONTAINER_CLEANUP", taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt's internal state is not FAIL_TASK_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_TASK_CLEANUP);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_CLEANUP_DONE));
assertEquals("Task attempt is not in KILLED state", taImpl.getState(),
TaskAttemptState.FAILED);
assertFalse("InternalError occurred", eventHandler.internalError);
}
@Test
public void testTaskAttemptDiagnosticEventOnFinishing() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_DONE));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertEquals("Task attempt's internal state is not " +
"SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
// TA_DIAGNOSTICS_UPDATE doesn't change state
taImpl.handle(new TaskAttemptDiagnosticsUpdateEvent(taImpl.getID(),
"Task got updated"));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertEquals("Task attempt's internal state is not " +
"SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
assertFalse("InternalError occurred", eventHandler.internalError);
}
@Test
public void testTimeoutWhileSuccessFinishing() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_DONE));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertEquals("Task attempt's internal state is not " +
"SUCCESS_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER);
// If the task stays in SUCCESS_FINISHING_CONTAINER for too long,
// TaskAttemptListenerImpl will time out the attempt.
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_TIMED_OUT));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.SUCCEEDED);
assertEquals("Task attempt's internal state is not " +
"SUCCESS_CONTAINER_CLEANUP", taImpl.getInternalState(),
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP);
assertFalse("InternalError occurred", eventHandler.internalError);
}
@Test
public void testTimeoutWhileFailFinishing() throws Exception {
MockEventHandler eventHandler = new MockEventHandler();
TaskAttemptImpl taImpl = createTaskAttemptImpl(eventHandler);
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_FAILMSG));
assertEquals("Task attempt is not in RUNNING state", taImpl.getState(),
TaskAttemptState.FAILED);
assertEquals("Task attempt's internal state is not " +
"FAIL_FINISHING_CONTAINER", taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER);
// If the task stays in FAIL_FINISHING_CONTAINER for too long,
// TaskAttemptListenerImpl will time out the attempt.
taImpl.handle(new TaskAttemptEvent(taImpl.getID(),
TaskAttemptEventType.TA_TIMED_OUT));
assertEquals("Task attempt's internal state is not FAIL_CONTAINER_CLEANUP",
taImpl.getInternalState(),
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP);
assertFalse("InternalError occurred", eventHandler.internalError);
}
private void setupTaskAttemptFinishingMonitor(
EventHandler eventHandler, JobConf jobConf, AppContext appCtx) {
TaskAttemptFinishingMonitor taskAttemptFinishingMonitor =
new TaskAttemptFinishingMonitor(eventHandler);
taskAttemptFinishingMonitor.init(jobConf);
when(appCtx.getTaskAttemptFinishingMonitor()).
thenReturn(taskAttemptFinishingMonitor);
}
private TaskAttemptImpl createTaskAttemptImpl(
MockEventHandler eventHandler) {
ApplicationId appId = ApplicationId.newInstance(1, 2);
ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 0);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
Path jobFile = mock(Path.class);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, "10");
TaskSplitMetaInfo splits = mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[] {"127.0.0.1"});
AppContext appCtx = mock(AppContext.class);
ClusterInfo clusterInfo = mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx);
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
splits, jobConf, taListener,
mock(Token.class), new Credentials(),
new SystemClock(), appCtx);
NodeId nid = NodeId.newInstance("127.0.0.1", 0);
ContainerId contId = ContainerId.newInstance(appAttemptId, 3);
Container container = mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,
TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,
container, mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId, 0));
return taImpl;
}
public static class MockEventHandler implements EventHandler {
public boolean internalError;
@Override
public void handle(Event event) {
if (event instanceof JobEvent) {
JobEvent je = ((JobEvent) event);
if (JobEventType.INTERNAL_ERROR == je.getType()) {
internalError = true;
}
}
}
};
}
| 55,318
| 43.290633
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Avataar;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings("rawtypes")
public class TestTaskImpl {
private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);
private JobConf conf;
private TaskAttemptListener taskAttemptListener;
private Token<JobTokenIdentifier> jobToken;
private JobId jobId;
private Path remoteJobConfFile;
private Credentials credentials;
private Clock clock;
private MRAppMetrics metrics;
private TaskImpl mockTask;
private ApplicationId appId;
private TaskSplitMetaInfo taskSplitMetaInfo;
private String[] dataLocations = new String[0];
private AppContext appContext;
private int startCount = 0;
private int taskCounter = 0;
private final int partition = 1;
private InlineDispatcher dispatcher;
private List<MockTaskAttemptImpl> taskAttempts;
private class MockTaskImpl extends TaskImpl {
private int taskAttemptCounter = 0;
TaskType taskType;
public MockTaskImpl(JobId jobId, int partition,
EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock, int startCount,
MRAppMetrics metrics, AppContext appContext, TaskType taskType) {
super(jobId, taskType , partition, eventHandler,
remoteJobConfFile, conf, taskAttemptListener,
jobToken, credentials, clock,
startCount, metrics, appContext);
this.taskType = taskType;
}
@Override
public TaskType getType() {
return taskType;
}
@Override
protected TaskAttemptImpl createAttempt() {
MockTaskAttemptImpl attempt = new MockTaskAttemptImpl(getID(), ++taskAttemptCounter,
eventHandler, taskAttemptListener, remoteJobConfFile, partition,
conf, jobToken, credentials, clock, appContext, taskType);
taskAttempts.add(attempt);
return attempt;
}
@Override
protected int getMaxAttempts() {
return 100;
}
@Override
protected void internalError(TaskEventType type) {
super.internalError(type);
fail("Internal error: " + type);
}
}
private class MockTaskAttemptImpl extends TaskAttemptImpl {
private float progress = 0;
private TaskAttemptState state = TaskAttemptState.NEW;
private TaskType taskType;
private Counters attemptCounters = TaskAttemptImpl.EMPTY_COUNTERS;
public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
JobConf conf, Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext, TaskType taskType) {
super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
dataLocations, jobToken, credentials, clock, appContext);
this.taskType = taskType;
}
public TaskAttemptId getAttemptId() {
return getID();
}
@Override
protected Task createRemoteTask() {
return new MockTask(taskType);
}
public float getProgress() {
return progress ;
}
public void setProgress(float progress) {
this.progress = progress;
}
public void setState(TaskAttemptState state) {
this.state = state;
}
public TaskAttemptState getState() {
return state;
}
@Override
public Counters getCounters() {
return attemptCounters;
}
public void setCounters(Counters counters) {
attemptCounters = counters;
}
}
private class MockTask extends Task {
private TaskType taskType;
MockTask(TaskType taskType) {
this.taskType = taskType;
}
@Override
public void run(JobConf job, TaskUmbilicalProtocol umbilical)
throws IOException, ClassNotFoundException, InterruptedException {
return;
}
@Override
public boolean isMapTask() {
return (taskType == TaskType.MAP);
}
}
@Before
@SuppressWarnings("unchecked")
public void setup() {
dispatcher = new InlineDispatcher();
++startCount;
conf = new JobConf();
taskAttemptListener = mock(TaskAttemptListener.class);
jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
remoteJobConfFile = mock(Path.class);
credentials = null;
clock = new SystemClock();
metrics = mock(MRAppMetrics.class);
dataLocations = new String[1];
appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
jobId = Records.newRecord(JobId.class);
jobId.setId(1);
jobId.setAppId(appId);
appContext = mock(AppContext.class);
taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations);
taskAttempts = new ArrayList<MockTaskAttemptImpl>();
}
private MockTaskImpl createMockTask(TaskType taskType) {
return new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
remoteJobConfFile, conf, taskAttemptListener, jobToken,
credentials, clock,
startCount, metrics, appContext, taskType);
}
@After
public void teardown() {
taskAttempts.clear();
}
private TaskId getNewTaskID() {
TaskId taskId = Records.newRecord(TaskId.class);
taskId.setId(++taskCounter);
taskId.setJobId(jobId);
taskId.setTaskType(mockTask.getType());
return taskId;
}
private void scheduleTaskAttempt(TaskId taskId) {
mockTask.handle(new TaskEvent(taskId,
TaskEventType.T_SCHEDULE));
assertTaskScheduledState();
assertTaskAttemptAvataar(Avataar.VIRGIN);
}
private void killTask(TaskId taskId) {
mockTask.handle(new TaskEvent(taskId,
TaskEventType.T_KILL));
assertTaskKillWaitState();
}
private void killScheduledTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_KILLED));
assertTaskScheduledState();
}
private void launchTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_LAUNCHED));
assertTaskRunningState();
}
private void commitTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_COMMIT_PENDING));
assertTaskRunningState();
}
private MockTaskAttemptImpl getLastAttempt() {
return taskAttempts.get(taskAttempts.size()-1);
}
private void updateLastAttemptProgress(float p) {
getLastAttempt().setProgress(p);
}
private void updateLastAttemptState(TaskAttemptState s) {
getLastAttempt().setState(s);
}
private void killRunningTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_KILLED));
assertTaskRunningState();
}
private void failRunningTaskAttempt(TaskAttemptId attemptId) {
mockTask.handle(new TaskTAttemptEvent(attemptId,
TaskEventType.T_ATTEMPT_FAILED));
assertTaskRunningState();
}
/**
* {@link TaskState#NEW}
*/
private void assertTaskNewState() {
assertEquals(TaskState.NEW, mockTask.getState());
}
/**
* {@link TaskState#SCHEDULED}
*/
private void assertTaskScheduledState() {
assertEquals(TaskState.SCHEDULED, mockTask.getState());
}
/**
* {@link TaskState#RUNNING}
*/
private void assertTaskRunningState() {
assertEquals(TaskState.RUNNING, mockTask.getState());
}
/**
* {@link TaskState#KILL_WAIT}
*/
private void assertTaskKillWaitState() {
assertEquals(TaskStateInternal.KILL_WAIT, mockTask.getInternalState());
}
/**
* {@link TaskState#SUCCEEDED}
*/
private void assertTaskSucceededState() {
assertEquals(TaskState.SUCCEEDED, mockTask.getState());
}
/**
* {@link Avataar}
*/
private void assertTaskAttemptAvataar(Avataar avataar) {
for (TaskAttempt taskAttempt : mockTask.getAttempts().values()) {
if (((TaskAttemptImpl) taskAttempt).getAvataar() == avataar) {
return;
}
}
fail("There is no " + (avataar == Avataar.VIRGIN ? "virgin" : "speculative")
+ "task attempt");
}
@Test
public void testInit() {
LOG.info("--- START: testInit ---");
mockTask = createMockTask(TaskType.MAP);
assertTaskNewState();
assert(taskAttempts.size() == 0);
}
@Test
/**
* {@link TaskState#NEW}->{@link TaskState#SCHEDULED}
*/
public void testScheduleTask() {
LOG.info("--- START: testScheduleTask ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
}
@Test
/**
* {@link TaskState#SCHEDULED}->{@link TaskState#KILL_WAIT}
*/
public void testKillScheduledTask() {
LOG.info("--- START: testKillScheduledTask ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
killTask(taskId);
}
@Test
/**
* Kill attempt
* {@link TaskState#SCHEDULED}->{@link TaskState#SCHEDULED}
*/
public void testKillScheduledTaskAttempt() {
LOG.info("--- START: testKillScheduledTaskAttempt ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
killScheduledTaskAttempt(getLastAttempt().getAttemptId());
}
@Test
/**
* Launch attempt
* {@link TaskState#SCHEDULED}->{@link TaskState#RUNNING}
*/
public void testLaunchTaskAttempt() {
LOG.info("--- START: testLaunchTaskAttempt ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
}
@Test
/**
* Kill running attempt
* {@link TaskState#RUNNING}->{@link TaskState#RUNNING}
*/
public void testKillRunningTaskAttempt() {
LOG.info("--- START: testKillRunningTaskAttempt ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
killRunningTaskAttempt(getLastAttempt().getAttemptId());
}
@Test
public void testKillSuccessfulTask() {
LOG.info("--- START: testKillSuccesfulTask ---");
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertTaskSucceededState();
mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
assertTaskSucceededState();
}
@Test
public void testTaskProgress() {
LOG.info("--- START: testTaskProgress ---");
mockTask = createMockTask(TaskType.MAP);
// launch task
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
float progress = 0f;
assert(mockTask.getProgress() == progress);
launchTaskAttempt(getLastAttempt().getAttemptId());
// update attempt1
progress = 50f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
progress = 100f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
progress = 0f;
// mark first attempt as killed
updateLastAttemptState(TaskAttemptState.KILLED);
assert(mockTask.getProgress() == progress);
// kill first attempt
// should trigger a new attempt
// as no successful attempts
killRunningTaskAttempt(getLastAttempt().getAttemptId());
assert(taskAttempts.size() == 2);
assert(mockTask.getProgress() == 0f);
launchTaskAttempt(getLastAttempt().getAttemptId());
progress = 50f;
updateLastAttemptProgress(progress);
assert(mockTask.getProgress() == progress);
}
@Test
public void testKillDuringTaskAttemptCommit() {
mockTask = createMockTask(TaskType.REDUCE);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
TaskAttemptId commitAttempt = getLastAttempt().getAttemptId();
updateLastAttemptState(TaskAttemptState.KILLED);
killRunningTaskAttempt(commitAttempt);
assertFalse(mockTask.canCommit(commitAttempt));
}
@Test
public void testFailureDuringTaskAttemptCommit() {
mockTask = createMockTask(TaskType.MAP);
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
// During the task attempt commit there is an exception which causes
// the attempt to fail
updateLastAttemptState(TaskAttemptState.FAILED);
failRunningTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(2, taskAttempts.size());
updateLastAttemptState(TaskAttemptState.SUCCEEDED);
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertFalse("First attempt should not commit",
mockTask.canCommit(taskAttempts.get(0).getAttemptId()));
assertTrue("Second attempt should commit",
mockTask.canCommit(getLastAttempt().getAttemptId()));
assertTaskSucceededState();
}
private void runSpeculativeTaskAttemptSucceeds(
TaskEventType firstAttemptFinishEvent) {
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
// Add a speculative task attempt that succeeds
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
// The task should now have succeeded
assertTaskSucceededState();
// Now complete the first task attempt, after the second has succeeded
mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(0).getAttemptId(),
firstAttemptFinishEvent));
// The task should still be in the succeeded state
assertTaskSucceededState();
// The task should contain speculative a task attempt
assertTaskAttemptAvataar(Avataar.SPECULATIVE);
}
@Test
public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_FAILED);
}
@Test
public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstFails() {
mockTask = createMockTask(TaskType.REDUCE);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_FAILED);
}
@Test
public void testMapSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_KILLED);
}
@Test
public void testReduceSpeculativeTaskAttemptSucceedsEvenIfFirstIsKilled() {
mockTask = createMockTask(TaskType.REDUCE);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_KILLED);
}
@Test
public void testMultipleTaskAttemptsSucceed() {
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_SUCCEEDED);
}
@Test
public void testCommitAfterSucceeds() {
mockTask = createMockTask(TaskType.REDUCE);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_COMMIT_PENDING);
}
@Test
public void testSpeculativeMapFetchFailure() {
// Setup a scenario where speculative task wins, first attempt killed
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_KILLED);
assertEquals(2, taskAttempts.size());
// speculative attempt retroactively fails from fetch failures
mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),
TaskEventType.T_ATTEMPT_FAILED));
assertTaskScheduledState();
assertEquals(3, taskAttempts.size());
}
@Test
public void testSpeculativeMapMultipleSucceedFetchFailure() {
// Setup a scenario where speculative task wins, first attempt succeeds
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_SUCCEEDED);
assertEquals(2, taskAttempts.size());
// speculative attempt retroactively fails from fetch failures
mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),
TaskEventType.T_ATTEMPT_FAILED));
assertTaskScheduledState();
assertEquals(3, taskAttempts.size());
}
@Test
public void testSpeculativeMapFailedFetchFailure() {
// Setup a scenario where speculative task wins, first attempt succeeds
mockTask = createMockTask(TaskType.MAP);
runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_FAILED);
assertEquals(2, taskAttempts.size());
// speculative attempt retroactively fails from fetch failures
mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),
TaskEventType.T_ATTEMPT_FAILED));
assertTaskScheduledState();
assertEquals(3, taskAttempts.size());
}
@Test
public void testFailedTransitions() {
mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
remoteJobConfFile, conf, taskAttemptListener, jobToken,
credentials, clock, startCount, metrics, appContext, TaskType.MAP) {
@Override
protected int getMaxAttempts() {
return 1;
}
};
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
// add three more speculative attempts
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(4, taskAttempts.size());
// have the first attempt fail, verify task failed due to no retries
MockTaskAttemptImpl taskAttempt = taskAttempts.get(0);
taskAttempt.setState(TaskAttemptState.FAILED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_FAILED));
assertEquals(TaskState.FAILED, mockTask.getState());
// verify task can no longer be killed
mockTask.handle(new TaskEvent(taskId, TaskEventType.T_KILL));
assertEquals(TaskState.FAILED, mockTask.getState());
// verify speculative doesn't launch new tasks
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ATTEMPT_LAUNCHED));
assertEquals(TaskState.FAILED, mockTask.getState());
assertEquals(4, taskAttempts.size());
// verify attempt events from active tasks don't knock task out of FAILED
taskAttempt = taskAttempts.get(1);
taskAttempt.setState(TaskAttemptState.COMMIT_PENDING);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_COMMIT_PENDING));
assertEquals(TaskState.FAILED, mockTask.getState());
taskAttempt.setState(TaskAttemptState.FAILED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_FAILED));
assertEquals(TaskState.FAILED, mockTask.getState());
taskAttempt = taskAttempts.get(2);
taskAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.FAILED, mockTask.getState());
taskAttempt = taskAttempts.get(3);
taskAttempt.setState(TaskAttemptState.KILLED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_KILLED));
assertEquals(TaskState.FAILED, mockTask.getState());
}
@Test
public void testCountersWithSpeculation() {
mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
remoteJobConfFile, conf, taskAttemptListener, jobToken,
credentials, clock, startCount, metrics, appContext, TaskType.MAP) {
@Override
protected int getMaxAttempts() {
return 1;
}
};
TaskId taskId = getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl baseAttempt = getLastAttempt();
// add a speculative attempt
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl specAttempt = getLastAttempt();
assertEquals(2, taskAttempts.size());
Counters specAttemptCounters = new Counters();
Counter cpuCounter = specAttemptCounters.findCounter(
TaskCounter.CPU_MILLISECONDS);
cpuCounter.setValue(1000);
specAttempt.setCounters(specAttemptCounters);
// have the spec attempt succeed but second attempt at 1.0 progress as well
commitTaskAttempt(specAttempt.getAttemptId());
specAttempt.setProgress(1.0f);
specAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(),
TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.SUCCEEDED, mockTask.getState());
baseAttempt.setProgress(1.0f);
Counters taskCounters = mockTask.getCounters();
assertEquals("wrong counters for task", specAttemptCounters, taskCounters);
}
}
| 25,959
| 33.429708
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttemptContainerRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.DataInputByteBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapred.WrappedJvmID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Test;
@SuppressWarnings({"rawtypes"})
public class TestTaskAttemptContainerRequest {
//WARNING: This test must be the only test in this file. This is because
// there is an optimization where the credentials passed in are cached
// statically so they do not need to be recomputed when creating a new
// ContainerLaunchContext. if other tests run first this code will cache
// their credentials and this test will fail trying to look for the
// credentials it inserted in.
@Test
public void testAttemptContainerRequest() throws Exception {
final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
final byte[] SECRET_KEY = ("secretkey").getBytes();
Map<ApplicationAccessType, String> acls =
new HashMap<ApplicationAccessType, String>(1);
acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
ApplicationId appId = ApplicationId.newInstance(1, 1);
JobId jobId = MRBuilderUtils.newJobId(appId, 1);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
Path jobFile = mock(Path.class);
EventHandler eventHandler = mock(EventHandler.class);
TaskAttemptListener taListener = mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));
JobConf jobConf = new JobConf();
jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache", true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");
// setup UGI for security so tokens and keys are preserved
jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(jobConf);
Credentials credentials = new Credentials();
credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
("tokenid").getBytes(), ("tokenpw").getBytes(),
new Text("tokenkind"), new Text("tokenservice"));
TaskAttemptImpl taImpl =
new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
mock(TaskSplitMetaInfo.class), jobConf, taListener,
jobToken, credentials,
new SystemClock(), null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());
ContainerLaunchContext launchCtx =
TaskAttemptImpl.createContainerLaunchContext(acls,
jobConf, jobToken, taImpl.createRemoteTask(),
TypeConverter.fromYarn(jobId),
mock(WrappedJvmID.class), taListener,
credentials);
Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
Credentials launchCredentials = new Credentials();
DataInputByteBuffer dibb = new DataInputByteBuffer();
dibb.reset(launchCtx.getTokens());
launchCredentials.readTokenStorageStream(dibb);
// verify all tokens specified for the task attempt are in the launch context
for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
Token<? extends TokenIdentifier> launchToken =
launchCredentials.getToken(token.getService());
Assert.assertNotNull("Token " + token.getService() + " is missing",
launchToken);
Assert.assertEquals("Token " + token.getService() + " mismatch",
token, launchToken);
}
// verify the secret key is in the launch context
Assert.assertNotNull("Secret key missing",
launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY,
launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
static public class StubbedFS extends RawLocalFileSystem {
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return new FileStatus(1, false, 1, 1, 1, f);
}
}
}
| 6,446
| 41.98
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.jobhistory.EventType;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventType;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.event.InlineDispatcher;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Tests various functions of the JobImpl class
*/
@SuppressWarnings({"rawtypes"})
public class TestJobImpl {
static String stagingDir = "target/test-staging/";
@BeforeClass
public static void setup() {
File dir = new File(stagingDir);
stagingDir = dir.getAbsolutePath();
}
@Before
public void cleanup() throws IOException {
File dir = new File(stagingDir);
if(dir.exists()) {
FileUtils.deleteDirectory(dir);
}
dir.mkdirs();
}
@Test
public void testJobNoTasks() {
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.set(MRJobConfig.WORKFLOW_ID, "testId");
conf.set(MRJobConfig.WORKFLOW_NAME, "testName");
conf.set(MRJobConfig.WORKFLOW_NODE_NAME, "testNodeName");
conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key1", "value1");
conf.set(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING + "key2", "value2");
conf.set(MRJobConfig.WORKFLOW_TAGS, "tag1,tag2");
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = mock(OutputCommitter.class);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobSubmittedEventHandler jseHandler = new JobSubmittedEventHandler("testId",
"testName", "testNodeName", "\"key2\"=\"value2\" \"key1\"=\"value1\" ",
"tag1,tag2");
dispatcher.register(EventType.class, jseHandler);
JobImpl job = createStubbedJob(conf, dispatcher, 0, null);
job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(job.getID()));
assertJobState(job, JobStateInternal.SUCCEEDED);
dispatcher.stop();
commitHandler.stop();
try {
Assert.assertTrue(jseHandler.getAssertValue());
} catch (InterruptedException e) {
Assert.fail("Workflow related attributes are not tested properly");
}
}
@Test(timeout=20000)
public void testCommitJobFailsJob() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier = new CyclicBarrier(2);
OutputCommitter committer = new TestingOutputCommitter(syncBarrier, false);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
completeJobTasks(job);
assertJobState(job, JobStateInternal.COMMITTING);
// let the committer fail and verify the job fails
syncBarrier.await();
assertJobState(job, JobStateInternal.FAILED);
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testCheckJobCompleteSuccess() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier = new CyclicBarrier(2);
OutputCommitter committer = new TestingOutputCommitter(syncBarrier, true);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
completeJobTasks(job);
assertJobState(job, JobStateInternal.COMMITTING);
// let the committer complete and verify the job succeeds
syncBarrier.await();
assertJobState(job, JobStateInternal.SUCCEEDED);
job.handle(new JobEvent(job.getID(),
JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
assertJobState(job, JobStateInternal.SUCCEEDED);
job.handle(new JobEvent(job.getID(),
JobEventType.JOB_MAP_TASK_RESCHEDULED));
assertJobState(job, JobStateInternal.SUCCEEDED);
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testRebootedDuringSetup() throws Exception{
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = new StubbedOutputCommitter() {
@Override
public synchronized void setupJob(JobContext jobContext)
throws IOException {
while(!Thread.interrupted()){
try{
wait();
}catch (InterruptedException e) {
}
}
}
};
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext = mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(false);
JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.SETUP);
job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
assertJobState(job, JobStateInternal.REBOOT);
// return the external state as RUNNING since otherwise JobClient will
// exit when it polls the AM for job state
Assert.assertEquals(JobState.RUNNING, job.getState());
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testRebootedDuringCommit() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 2);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier = new CyclicBarrier(2);
OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext = mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(true);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext);
completeJobTasks(job);
assertJobState(job, JobStateInternal.COMMITTING);
syncBarrier.await();
job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT));
assertJobState(job, JobStateInternal.REBOOT);
// return the external state as ERROR since this is last retry.
Assert.assertEquals(JobState.RUNNING, job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.ERROR, job.getState());
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testKilledDuringSetup() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = new StubbedOutputCommitter() {
@Override
public synchronized void setupJob(JobContext jobContext)
throws IOException {
while (!Thread.interrupted()) {
try {
wait();
} catch (InterruptedException e) {
}
}
}
};
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.SETUP);
job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILLED);
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testKilledDuringCommit() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier = new CyclicBarrier(2);
OutputCommitter committer = new WaitingOutputCommitter(syncBarrier, true);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
completeJobTasks(job);
assertJobState(job, JobStateInternal.COMMITTING);
syncBarrier.await();
job.handle(new JobEvent(job.getID(), JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILLED);
dispatcher.stop();
commitHandler.stop();
}
@Test
public void testAbortJobCalledAfterKillingTasks() throws IOException {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
InlineDispatcher dispatcher = new InlineDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = Mockito.mock(OutputCommitter.class);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
//Fail one task. This should land the JobImpl in the FAIL_WAIT state
job.handle(new JobTaskEvent(
MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
TaskState.FAILED));
//Verify abort job hasn't been called
Mockito.verify(committer, Mockito.never())
.abortJob((JobContext) Mockito.any(), (State) Mockito.any());
assertJobState(job, JobStateInternal.FAIL_WAIT);
//Verify abortJob is called once and the job failed
Mockito.verify(committer, Mockito.timeout(2000).times(1))
.abortJob((JobContext) Mockito.any(), (State) Mockito.any());
assertJobState(job, JobStateInternal.FAILED);
dispatcher.stop();
}
@Test (timeout=10000)
public void testFailAbortDoesntHang() throws IOException {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
DrainDispatcher dispatcher = new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = Mockito.mock(OutputCommitter.class);
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
//Job has only 1 mapper task. No reducers
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
JobImpl job = createRunningStubbedJob(conf, dispatcher, 1, null);
//Fail / finish all the tasks. This should land the JobImpl directly in the
//FAIL_ABORT state
for(Task t: job.tasks.values()) {
TaskImpl task = (TaskImpl) t;
task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
for(TaskAttempt ta: task.getAttempts().values()) {
task.handle(new TaskTAttemptEvent(ta.getID(),
TaskEventType.T_ATTEMPT_FAILED));
}
}
dispatcher.await();
//Verify abortJob is called once and the job failed
Mockito.verify(committer, Mockito.timeout(2000).times(1))
.abortJob((JobContext) Mockito.any(), (State) Mockito.any());
assertJobState(job, JobStateInternal.FAILED);
dispatcher.stop();
}
@Test(timeout=20000)
public void testKilledDuringFailAbort() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = new StubbedOutputCommitter() {
@Override
public void setupJob(JobContext jobContext) throws IOException {
throw new IOException("forced failure");
}
@Override
public synchronized void abortJob(JobContext jobContext, State state)
throws IOException {
while (!Thread.interrupted()) {
try {
wait();
} catch (InterruptedException e) {
}
}
}
};
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.FAIL_ABORT);
job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILLED);
dispatcher.stop();
commitHandler.stop();
}
@Test(timeout=20000)
public void testKilledDuringKillAbort() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = new StubbedOutputCommitter() {
@Override
public synchronized void abortJob(JobContext jobContext, State state)
throws IOException {
while (!Thread.interrupted()) {
try {
wait();
} catch (InterruptedException e) {
}
}
}
};
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createStubbedJob(conf, dispatcher, 2, null);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.SETUP);
job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILL_ABORT);
job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
assertJobState(job, JobStateInternal.KILLED);
dispatcher.stop();
commitHandler.stop();
}
public static void main(String[] args) throws Exception {
TestJobImpl t = new TestJobImpl();
t.testJobNoTasks();
t.testCheckJobCompleteSuccess();
t.testCheckAccess();
t.testReportDiagnostics();
t.testUberDecision();
}
@Test
public void testCheckAccess() {
// Create two unique users
String user1 = System.getProperty("user.name");
String user2 = user1 + "1234";
UserGroupInformation ugi1 = UserGroupInformation.createRemoteUser(user1);
UserGroupInformation ugi2 = UserGroupInformation.createRemoteUser(user2);
// Create the job
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
// Setup configuration access only to user1 (owner)
Configuration conf1 = new Configuration();
conf1.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf1.set(MRJobConfig.JOB_ACL_VIEW_JOB, "");
// Verify access
JobImpl job1 = new JobImpl(jobId, null, conf1, null, null, null, null, null,
null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job1.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertFalse(job1.checkAccess(ugi2, JobACL.VIEW_JOB));
// Setup configuration access to the user1 (owner) and user2
Configuration conf2 = new Configuration();
conf2.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf2.set(MRJobConfig.JOB_ACL_VIEW_JOB, user2);
// Verify access
JobImpl job2 = new JobImpl(jobId, null, conf2, null, null, null, null, null,
null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job2.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job2.checkAccess(ugi2, JobACL.VIEW_JOB));
// Setup configuration access with security enabled and access to all
Configuration conf3 = new Configuration();
conf3.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf3.set(MRJobConfig.JOB_ACL_VIEW_JOB, "*");
// Verify access
JobImpl job3 = new JobImpl(jobId, null, conf3, null, null, null, null, null,
null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job3.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job3.checkAccess(ugi2, JobACL.VIEW_JOB));
// Setup configuration access without security enabled
Configuration conf4 = new Configuration();
conf4.setBoolean(MRConfig.MR_ACLS_ENABLED, false);
conf4.set(MRJobConfig.JOB_ACL_VIEW_JOB, "");
// Verify access
JobImpl job4 = new JobImpl(jobId, null, conf4, null, null, null, null, null,
null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB));
// Setup configuration access without security enabled
Configuration conf5 = new Configuration();
conf5.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB, "");
// Verify access
JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null,
null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job5.checkAccess(ugi1, null));
Assert.assertTrue(job5.checkAccess(ugi2, null));
}
@Test
public void testReportDiagnostics() throws Exception {
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
final String diagMsg = "some diagnostic message";
final JobDiagnosticsUpdateEvent diagUpdateEvent =
new JobDiagnosticsUpdateEvent(jobId, diagMsg);
MRAppMetrics mrAppMetrics = MRAppMetrics.create();
AppContext mockContext = mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
JobImpl job = new JobImpl(jobId, Records
.newRecord(ApplicationAttemptId.class), new Configuration(),
mock(EventHandler.class),
null, mock(JobTokenSecretManager.class), null,
new SystemClock(), null,
mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
job.handle(diagUpdateEvent);
String diagnostics = job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
job = new JobImpl(jobId, Records
.newRecord(ApplicationAttemptId.class), new Configuration(),
mock(EventHandler.class),
null, mock(JobTokenSecretManager.class), null,
new SystemClock(), null,
mrAppMetrics, null, true, null, 0, null, mockContext, null, null);
job.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
job.handle(diagUpdateEvent);
diagnostics = job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
}
@Test
public void testUberDecision() throws Exception {
// with default values, no of maps is 2
Configuration conf = new Configuration();
boolean isUber = testUberDecision(conf);
Assert.assertFalse(isUber);
// enable uber mode, no of maps is 2
conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
isUber = testUberDecision(conf);
Assert.assertTrue(isUber);
// enable uber mode, no of maps is 2, no of reduces is 1 and uber task max
// reduces is 0
conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 0);
conf.setInt(MRJobConfig.NUM_REDUCES, 1);
isUber = testUberDecision(conf);
Assert.assertFalse(isUber);
// enable uber mode, no of maps is 2, no of reduces is 1 and uber task max
// reduces is 1
conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1);
conf.setInt(MRJobConfig.NUM_REDUCES, 1);
isUber = testUberDecision(conf);
Assert.assertTrue(isUber);
// enable uber mode, no of maps is 2 and uber task max maps is 0
conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 1);
isUber = testUberDecision(conf);
Assert.assertFalse(isUber);
// enable uber mode of 0 reducer no matter how much memory assigned to reducer
conf = new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, true);
conf.setInt(MRJobConfig.NUM_REDUCES, 0);
conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 2048);
conf.setInt(MRJobConfig.REDUCE_CPU_VCORES, 10);
isUber = testUberDecision(conf);
Assert.assertTrue(isUber);
}
private boolean testUberDecision(Configuration conf) {
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
MRAppMetrics mrAppMetrics = MRAppMetrics.create();
JobImpl job =
new JobImpl(jobId, ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
null, new JobTokenSecretManager(), new Credentials(), null, null,
mrAppMetrics, null, true, null, 0, null, null, null, null);
InitTransition initTransition = getInitTransition(2);
JobEvent mockJobEvent = mock(JobEvent.class);
initTransition.transition(job, mockJobEvent);
boolean isUber = job.isUber();
return isUber;
}
private static InitTransition getInitTransition(final int numSplits) {
InitTransition initTransition = new InitTransition() {
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numSplits];
for (int i = 0; i < numSplits; ++i) {
splits[i] = new TaskSplitMetaInfo();
}
return splits;
}
};
return initTransition;
}
@Test
public void testTransitionsAtFailed() throws IOException {
Configuration conf = new Configuration();
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = mock(OutputCommitter.class);
doThrow(new IOException("forcefail"))
.when(committer).setupJob(any(JobContext.class));
CommitterEventHandler commitHandler =
createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext = mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext);
JobId jobId = job.getID();
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
assertJobState(job, JobStateInternal.FAILED);
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
assertJobState(job, JobStateInternal.FAILED);
Assert.assertEquals(JobState.RUNNING, job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.FAILED, job.getState());
dispatcher.stop();
commitHandler.stop();
}
static final String EXCEPTIONMSG = "Splits max exceeded";
@Test
public void testMetaInfoSizeOverMax() throws Exception {
Configuration conf = new Configuration();
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
MRAppMetrics mrAppMetrics = MRAppMetrics.create();
JobImpl job =
new JobImpl(jobId, ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
null, new JobTokenSecretManager(), new Credentials(), null, null,
mrAppMetrics, null, true, null, 0, null, null, null, null);
InitTransition initTransition = new InitTransition() {
@Override
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
throw new YarnRuntimeException(EXCEPTIONMSG);
}
};
JobEvent mockJobEvent = mock(JobEvent.class);
JobStateInternal jobSI = initTransition.transition(job, mockJobEvent);
Assert.assertTrue("When init fails, return value from InitTransition.transition should equal NEW.",
jobSI.equals(JobStateInternal.NEW));
Assert.assertTrue("Job diagnostics should contain YarnRuntimeException",
job.getDiagnostics().toString().contains("YarnRuntimeException"));
Assert.assertTrue("Job diagnostics should contain " + EXCEPTIONMSG,
job.getDiagnostics().toString().contains(EXCEPTIONMSG));
}
private static CommitterEventHandler createCommitterEventHandler(
Dispatcher dispatcher, OutputCommitter committer) {
final SystemClock clock = new SystemClock();
AppContext appContext = mock(AppContext.class);
when(appContext.getEventHandler()).thenReturn(
dispatcher.getEventHandler());
when(appContext.getClock()).thenReturn(clock);
RMHeartbeatHandler heartbeatHandler = new RMHeartbeatHandler() {
@Override
public long getLastHeartbeatTime() {
return clock.getTime();
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
callback.run();
}
};
ApplicationAttemptId id =
ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
when(appContext.getApplicationID()).thenReturn(id.getApplicationId());
when(appContext.getApplicationAttemptId()).thenReturn(id);
CommitterEventHandler handler =
new CommitterEventHandler(appContext, committer, heartbeatHandler);
dispatcher.register(CommitterEventType.class, handler);
return handler;
}
private static StubbedJob createStubbedJob(Configuration conf,
Dispatcher dispatcher, int numSplits, AppContext appContext) {
JobID jobID = JobID.forName("job_1234567890000_0001");
JobId jobId = TypeConverter.toYarn(jobID);
if (appContext == null) {
appContext = mock(AppContext.class);
when(appContext.hasSuccessfullyUnregistered()).thenReturn(true);
}
StubbedJob job = new StubbedJob(jobId,
ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0),
conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext);
dispatcher.register(JobEventType.class, job);
EventHandler mockHandler = mock(EventHandler.class);
dispatcher.register(TaskEventType.class, mockHandler);
dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class,
mockHandler);
dispatcher.register(JobFinishEvent.Type.class, mockHandler);
return job;
}
private static StubbedJob createRunningStubbedJob(Configuration conf,
Dispatcher dispatcher, int numSplits, AppContext appContext) {
StubbedJob job = createStubbedJob(conf, dispatcher, numSplits, appContext);
job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT));
assertJobState(job, JobStateInternal.INITED);
job.handle(new JobStartEvent(job.getID()));
assertJobState(job, JobStateInternal.RUNNING);
return job;
}
private static void completeJobTasks(JobImpl job) {
// complete the map tasks and the reduce tasks so we start committing
int numMaps = job.getTotalMaps();
for (int i = 0; i < numMaps; ++i) {
job.handle(new JobTaskEvent(
MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
TaskState.SUCCEEDED));
Assert.assertEquals(JobState.RUNNING, job.getState());
}
int numReduces = job.getTotalReduces();
for (int i = 0; i < numReduces; ++i) {
job.handle(new JobTaskEvent(
MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
TaskState.SUCCEEDED));
Assert.assertEquals(JobState.RUNNING, job.getState());
}
}
private static void assertJobState(JobImpl job, JobStateInternal state) {
int timeToWaitMsec = 5 * 1000;
while (timeToWaitMsec > 0 && job.getInternalState() != state) {
try {
Thread.sleep(10);
timeToWaitMsec -= 10;
} catch (InterruptedException e) {
break;
}
}
Assert.assertEquals(state, job.getInternalState());
}
private static class JobSubmittedEventHandler implements
EventHandler<JobHistoryEvent> {
private String workflowId;
private String workflowName;
private String workflowNodeName;
private String workflowAdjacencies;
private String workflowTags;
private Boolean assertBoolean;
public JobSubmittedEventHandler(String workflowId, String workflowName,
String workflowNodeName, String workflowAdjacencies,
String workflowTags) {
this.workflowId = workflowId;
this.workflowName = workflowName;
this.workflowNodeName = workflowNodeName;
this.workflowAdjacencies = workflowAdjacencies;
this.workflowTags = workflowTags;
assertBoolean = null;
}
@Override
public void handle(JobHistoryEvent jhEvent) {
if (jhEvent.getType() != EventType.JOB_SUBMITTED) {
return;
}
JobSubmittedEvent jsEvent = (JobSubmittedEvent) jhEvent.getHistoryEvent();
if (!workflowId.equals(jsEvent.getWorkflowId())) {
setAssertValue(false);
return;
}
if (!workflowName.equals(jsEvent.getWorkflowName())) {
setAssertValue(false);
return;
}
if (!workflowNodeName.equals(jsEvent.getWorkflowNodeName())) {
setAssertValue(false);
return;
}
String[] wrkflowAdj = workflowAdjacencies.split(" ");
String[] jswrkflowAdj = jsEvent.getWorkflowAdjacencies().split(" ");
Arrays.sort(wrkflowAdj);
Arrays.sort(jswrkflowAdj);
if (!Arrays.equals(wrkflowAdj, jswrkflowAdj)) {
setAssertValue(false);
return;
}
if (!workflowTags.equals(jsEvent.getWorkflowTags())) {
setAssertValue(false);
return;
}
setAssertValue(true);
}
private synchronized void setAssertValue(Boolean bool) {
assertBoolean = bool;
notify();
}
public synchronized boolean getAssertValue() throws InterruptedException {
while (assertBoolean == null) {
wait();
}
return assertBoolean;
}
}
private static class StubbedJob extends JobImpl {
//override the init transition
private final InitTransition initTransition;
StateMachineFactory<JobImpl, JobStateInternal, JobEventType, JobEvent>
localFactory;
private final StateMachine<JobStateInternal, JobEventType, JobEvent>
localStateMachine;
@Override
protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() {
return localStateMachine;
}
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
String user, int numSplits, AppContext appContext) {
super(jobId, applicationAttemptId, conf, eventHandler,
null, new JobTokenSecretManager(), new Credentials(),
new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
MRAppMetrics.create(), null, newApiCommitter, user,
System.currentTimeMillis(), null, appContext, null, null);
initTransition = getInitTransition(numSplits);
localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
JobEventType.JOB_INIT,
// This is abusive.
initTransition);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
localStateMachine = localFactory.make(this);
}
}
private static class StubbedOutputCommitter extends OutputCommitter {
public StubbedOutputCommitter() {
super();
}
@Override
public void setupJob(JobContext jobContext) throws IOException {
}
@Override
public void setupTask(TaskAttemptContext taskContext) throws IOException {
}
@Override
public boolean needsTaskCommit(TaskAttemptContext taskContext)
throws IOException {
return false;
}
@Override
public void commitTask(TaskAttemptContext taskContext) throws IOException {
}
@Override
public void abortTask(TaskAttemptContext taskContext) throws IOException {
}
}
private static class TestingOutputCommitter extends StubbedOutputCommitter {
CyclicBarrier syncBarrier;
boolean shouldSucceed;
public TestingOutputCommitter(CyclicBarrier syncBarrier,
boolean shouldSucceed) {
super();
this.syncBarrier = syncBarrier;
this.shouldSucceed = shouldSucceed;
}
@Override
public void commitJob(JobContext jobContext) throws IOException {
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
} catch (InterruptedException e) {
}
if (!shouldSucceed) {
throw new IOException("forced failure");
}
}
}
private static class WaitingOutputCommitter extends TestingOutputCommitter {
public WaitingOutputCommitter(CyclicBarrier syncBarrier,
boolean shouldSucceed) {
super(syncBarrier, shouldSucceed);
}
@Override
public void commitJob(JobContext jobContext) throws IOException {
try {
syncBarrier.await();
} catch (BrokenBarrierException e) {
} catch (InterruptedException e) {
}
while (!Thread.interrupted()) {
try {
synchronized (this) {
wait();
}
} catch (InterruptedException e) {
break;
}
}
}
}
}
| 39,886
| 37.060115
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestMapReduceChildJVM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.junit.Test;
public class TestMapReduceChildJVM {
private static final Log LOG = LogFactory.getLog(TestMapReduceChildJVM.class);
@Test (timeout = 30000)
public void testCommandLine() throws Exception {
MyMRApp app = new MyMRApp(1, 0, true, this.getClass().getName(), true);
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals(
"[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" +
" -Djava.net.preferIPv4Stack=true" +
" -Dhadoop.metrics.log.level=WARN" +
" -Xmx200m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" +
" -Dlog4j.configuration=container-log4j.properties" +
" -Dyarn.app.container.log.dir=<LOG_DIR>" +
" -Dyarn.app.container.log.filesize=0" +
" -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" +
" org.apache.hadoop.mapred.YarnChild 127.0.0.1" +
" 54321" +
" attempt_0_0000_m_000000_0" +
" 0" +
" 1><LOG_DIR>/stdout" +
" 2><LOG_DIR>/stderr ]", app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",
app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",
app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",
app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
@Test (timeout = 30000)
public void testReduceCommandLineWithSeparateShuffle() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.REDUCE_SEPARATE_SHUFFLE_LOG, true);
testReduceCommandLine(conf);
}
@Test (timeout = 30000)
public void testReduceCommandLineWithSeparateCRLAShuffle() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(MRJobConfig.REDUCE_SEPARATE_SHUFFLE_LOG, true);
conf.setLong(MRJobConfig.SHUFFLE_LOG_KB, 1L);
conf.setInt(MRJobConfig.SHUFFLE_LOG_BACKUPS, 3);
testReduceCommandLine(conf);
}
@Test (timeout = 30000)
public void testReduceCommandLine() throws Exception {
final Configuration conf = new Configuration();
testReduceCommandLine(conf);
}
private void testReduceCommandLine(Configuration conf)
throws Exception {
MyMRApp app = new MyMRApp(0, 1, true, this.getClass().getName(), true);
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
final long shuffleLogSize =
conf.getLong(MRJobConfig.SHUFFLE_LOG_KB, 0L) * 1024L;
final int shuffleBackups = conf.getInt(MRJobConfig.SHUFFLE_LOG_BACKUPS, 0);
final String appenderName = shuffleLogSize > 0L && shuffleBackups > 0
? "shuffleCRLA"
: "shuffleCLA";
Assert.assertEquals(
"[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" +
" -Djava.net.preferIPv4Stack=true" +
" -Dhadoop.metrics.log.level=WARN" +
" -Xmx200m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" +
" -Dlog4j.configuration=container-log4j.properties" +
" -Dyarn.app.container.log.dir=<LOG_DIR>" +
" -Dyarn.app.container.log.filesize=0" +
" -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" +
" -Dyarn.app.mapreduce.shuffle.logger=INFO," + appenderName +
" -Dyarn.app.mapreduce.shuffle.logfile=syslog.shuffle" +
" -Dyarn.app.mapreduce.shuffle.log.filesize=" + shuffleLogSize +
" -Dyarn.app.mapreduce.shuffle.log.backups=" + shuffleBackups +
" org.apache.hadoop.mapred.YarnChild 127.0.0.1" +
" 54321" +
" attempt_0_0000_r_000000_0" +
" 0" +
" 1><LOG_DIR>/stdout" +
" 2><LOG_DIR>/stderr ]", app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",
app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",
app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",
app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
@Test (timeout = 30000)
public void testCommandLineWithLog4JConifg() throws Exception {
MyMRApp app = new MyMRApp(1, 0, true, this.getClass().getName(), true);
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
String testLogPropertieFile = "test-log4j.properties";
String testLogPropertiePath = "../"+"test-log4j.properties";
conf.set(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, testLogPropertiePath);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals(
"[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" +
" -Djava.net.preferIPv4Stack=true" +
" -Dhadoop.metrics.log.level=WARN" +
" -Xmx200m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" +
" -Dlog4j.configuration=" + testLogPropertieFile +
" -Dyarn.app.container.log.dir=<LOG_DIR>" +
" -Dyarn.app.container.log.filesize=0" +
" -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" +
" org.apache.hadoop.mapred.YarnChild 127.0.0.1" +
" 54321" +
" attempt_0_0000_m_000000_0" +
" 0" +
" 1><LOG_DIR>/stdout" +
" 2><LOG_DIR>/stderr ]", app.myCommandLine);
}
private static final class MyMRApp extends MRApp {
private String myCommandLine;
private Map<String, String> cmdEnvironment;
public MyMRApp(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@Override
protected ContainerLauncher createContainerLauncher(AppContext context) {
return new MockContainerLauncher() {
@Override
public void handle(ContainerLauncherEvent event) {
if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event;
ContainerLaunchContext launchContext =
launchEvent.getContainerLaunchContext();
String cmdString = launchContext.getCommands().toString();
LOG.info("launchContext " + cmdString);
myCommandLine = cmdString;
cmdEnvironment = launchContext.getEnvironment();
}
super.handle(event);
}
};
}
}
@Test
public void testEnvironmentVariables() throws Exception {
MyMRApp app = new MyMRApp(1, 0, true, this.getClass().getName(), true);
Configuration conf = new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV, "HADOOP_CLIENT_OPTS=test");
conf.setStrings(MRJobConfig.MAP_LOG_LEVEL, "WARN");
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",
app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("WARN,console",
app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",
app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("test", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
// Try one more.
app = new MyMRApp(1, 0, true, this.getClass().getName(), true);
conf = new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV, "HADOOP_ROOT_LOGGER=trace");
job = app.submit(conf);
app.waitForState(job, JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",
app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("trace",
app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}
}
| 10,124
| 41.012448
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/commit/TestCommitterEventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitFailedEvent;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestCommitterEventHandler {
public static class WaitForItHandler implements EventHandler {
private Event event = null;
@Override
public synchronized void handle(Event event) {
this.event = event;
notifyAll();
}
public synchronized Event getAndClearEvent() throws InterruptedException {
if (event == null) {
final long waitTime = 5000;
long waitStartTime = Time.monotonicNow();
while(event == null && Time.monotonicNow() - waitStartTime < waitTime) {
//Wait for at most 5 sec
wait(waitTime);
}
}
Event e = event;
event = null;
return e;
}
}
static String stagingDir = "target/test-staging/";
@BeforeClass
public static void setup() {
File dir = new File(stagingDir);
stagingDir = dir.getAbsolutePath();
}
@Before
public void cleanup() throws IOException {
File dir = new File(stagingDir);
if(dir.exists()) {
FileUtils.deleteDirectory(dir);
}
dir.mkdirs();
}
@Test
public void testCommitWindow() throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
AsyncDispatcher dispatcher = new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
TestingJobEventHandler jeh = new TestingJobEventHandler();
dispatcher.register(JobEventType.class, jeh);
SystemClock clock = new SystemClock();
AppContext appContext = mock(AppContext.class);
ApplicationAttemptId attemptid =
ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
when(appContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(appContext.getApplicationAttemptId()).thenReturn(attemptid);
when(appContext.getEventHandler()).thenReturn(
dispatcher.getEventHandler());
when(appContext.getClock()).thenReturn(clock);
OutputCommitter committer = mock(OutputCommitter.class);
TestingRMHeartbeatHandler rmhh =
new TestingRMHeartbeatHandler();
CommitterEventHandler ceh = new CommitterEventHandler(appContext,
committer, rmhh);
ceh.init(conf);
ceh.start();
// verify trying to commit when RM heartbeats are stale does not commit
ceh.handle(new CommitterJobCommitEvent(null, null));
long timeToWaitMs = 5000;
while (rmhh.getNumCallbacks() != 1 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals("committer did not register a heartbeat callback",
1, rmhh.getNumCallbacks());
verify(committer, never()).commitJob(any(JobContext.class));
Assert.assertEquals("committer should not have committed",
0, jeh.numCommitCompletedEvents);
// set a fresh heartbeat and verify commit completes
rmhh.setLastHeartbeatTime(clock.getTime());
timeToWaitMs = 5000;
while (jeh.numCommitCompletedEvents != 1 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals("committer did not complete commit after RM hearbeat",
1, jeh.numCommitCompletedEvents);
verify(committer, times(1)).commitJob(any(JobContext.class));
//Clean up so we can try to commit again (Don't do this at home)
cleanup();
// try to commit again and verify it goes through since the heartbeat
// is still fresh
ceh.handle(new CommitterJobCommitEvent(null, null));
timeToWaitMs = 5000;
while (jeh.numCommitCompletedEvents != 2 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs -= 10;
}
Assert.assertEquals("committer did not commit",
2, jeh.numCommitCompletedEvents);
verify(committer, times(2)).commitJob(any(JobContext.class));
ceh.stop();
dispatcher.stop();
}
private static class TestingRMHeartbeatHandler
implements RMHeartbeatHandler {
private long lastHeartbeatTime = 0;
private ConcurrentLinkedQueue<Runnable> callbacks =
new ConcurrentLinkedQueue<Runnable>();
@Override
public long getLastHeartbeatTime() {
return lastHeartbeatTime;
}
@Override
public void runOnNextHeartbeat(Runnable callback) {
callbacks.add(callback);
}
public void setLastHeartbeatTime(long timestamp) {
lastHeartbeatTime = timestamp;
Runnable callback = null;
while ((callback = callbacks.poll()) != null) {
callback.run();
}
}
public int getNumCallbacks() {
return callbacks.size();
}
}
private static class TestingJobEventHandler
implements EventHandler<JobEvent> {
int numCommitCompletedEvents = 0;
@Override
public void handle(JobEvent event) {
if (event.getType() == JobEventType.JOB_COMMIT_COMPLETED) {
++numCommitCompletedEvents;
}
}
}
@Test
public void testBasic() throws Exception {
AppContext mockContext = mock(AppContext.class);
OutputCommitter mockCommitter = mock(OutputCommitter.class);
Clock mockClock = mock(Clock.class);
CommitterEventHandler handler = new CommitterEventHandler(mockContext,
mockCommitter, new TestingRMHeartbeatHandler());
YarnConfiguration conf = new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
JobContext mockJobContext = mock(JobContext.class);
ApplicationAttemptId attemptid =
ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler = new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId, mockJobContext));
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user,
jobId);
Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user,
jobId);
Event e = waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitCompletedEvent);
FileSystem fs = FileSystem.get(conf);
assertTrue(startCommitFile.toString(), fs.exists(startCommitFile));
assertTrue(endCommitSuccessFile.toString(), fs.exists(endCommitSuccessFile));
assertFalse(endCommitFailureFile.toString(), fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
} finally {
handler.stop();
}
}
@Test
public void testFailure() throws Exception {
AppContext mockContext = mock(AppContext.class);
OutputCommitter mockCommitter = mock(OutputCommitter.class);
Clock mockClock = mock(Clock.class);
CommitterEventHandler handler = new CommitterEventHandler(mockContext,
mockCommitter, new TestingRMHeartbeatHandler());
YarnConfiguration conf = new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
JobContext mockJobContext = mock(JobContext.class);
ApplicationAttemptId attemptid =
ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId = TypeConverter.toYarn(
TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler = new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter)
.commitJob(any(JobContext.class));
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId, mockJobContext));
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user,
jobId);
Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user,
jobId);
Event e = waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitFailedEvent);
FileSystem fs = FileSystem.get(conf);
assertTrue(fs.exists(startCommitFile));
assertFalse(fs.exists(endCommitSuccessFile));
assertTrue(fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
} finally {
handler.stop();
}
}
}
| 12,154
| 35.722054
| 84
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.