repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttemptStateInternal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job;
import org.apache.hadoop.classification.InterfaceAudience.Private;
/**
* TaskAttemptImpl internal state machine states.
*
*/
@Private
public enum TaskAttemptStateInternal {
NEW,
UNASSIGNED,
ASSIGNED,
RUNNING,
COMMIT_PENDING,
// Transition into SUCCESS_FINISHING_CONTAINER
// After the attempt finishes successfully from
// TaskUmbilicalProtocol's point of view, it will transition to
// SUCCESS_FINISHING_CONTAINER state. That will give a chance for the
// container to exit by itself. In the transition,
// the attempt will notify the task via T_ATTEMPT_SUCCEEDED so that
// from job point of view, the task is considered succeeded.
// Transition out of SUCCESS_FINISHING_CONTAINER
// The attempt will transition from SUCCESS_FINISHING_CONTAINER to
// SUCCESS_CONTAINER_CLEANUP if it doesn't receive container exit
// notification within TASK_EXIT_TIMEOUT;
// Or it will transition to SUCCEEDED if it receives container exit
// notification from YARN.
SUCCESS_FINISHING_CONTAINER,
// Transition into FAIL_FINISHING_CONTAINER
// After the attempt fails from
// TaskUmbilicalProtocol's point of view, it will transition to
// FAIL_FINISHING_CONTAINER state. That will give a chance for the container
// to exit by itself. In the transition,
// the attempt will notify the task via T_ATTEMPT_FAILED so that
// from job point of view, the task is considered failed.
// Transition out of FAIL_FINISHING_CONTAINER
// The attempt will transition from FAIL_FINISHING_CONTAINER to
// FAIL_CONTAINER_CLEANUP if it doesn't receive container exit
// notification within TASK_EXIT_TIMEOUT;
// Or it will transition to FAILED if it receives container exit
// notification from YARN.
FAIL_FINISHING_CONTAINER,
SUCCESS_CONTAINER_CLEANUP,
SUCCEEDED,
FAIL_CONTAINER_CLEANUP,
FAIL_TASK_CLEANUP,
FAILED,
KILL_CONTAINER_CLEANUP,
KILL_TASK_CLEANUP,
KILLED,
}
| 2,775
| 36.013333
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskStateInternal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job;
public enum TaskStateInternal {
NEW, SCHEDULED, RUNNING, SUCCEEDED, FAILED, KILL_WAIT, KILLED
}
| 955
| 38.833333
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job;
import java.util.List;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
/**
* Read only view of TaskAttempt.
*/
public interface TaskAttempt {
TaskAttemptId getID();
TaskAttemptReport getReport();
List<String> getDiagnostics();
Counters getCounters();
float getProgress();
Phase getPhase();
TaskAttemptState getState();
/**
* Has attempt reached the final state or not.
* @return true if it has finished, else false
*/
boolean isFinished();
/**
* @return the container ID if a container is assigned, otherwise null.
*/
ContainerId getAssignedContainerID();
/**
* @return container mgr address if a container is assigned, otherwise null.
*/
String getAssignedContainerMgrAddress();
/**
* @return node's id if a container is assigned, otherwise null.
*/
NodeId getNodeId();
/**
* @return node's http address if a container is assigned, otherwise null.
*/
String getNodeHttpAddress();
/**
* @return node's rack name if a container is assigned, otherwise null.
*/
String getNodeRackName();
/**
* @return time at which container is launched. If container is not launched
* yet, returns 0.
*/
long getLaunchTime();
/**
* @return attempt's finish time. If attempt is not finished
* yet, returns 0.
*/
long getFinishTime();
/**
* @return The attempt's shuffle finish time if the attempt is a reduce. If
* attempt is not finished yet, returns 0.
*/
long getShuffleFinishTime();
/**
* @return The attempt's sort or merge finish time if the attempt is a reduce.
* If attempt is not finished yet, returns 0.
*/
long getSortFinishTime();
/**
* @return the port shuffle is on.
*/
public int getShufflePort();
}
| 2,966
| 27.528846
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job;
import java.util.Map;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
/**
* Read only view of Task.
*/
public interface Task {
TaskId getID();
TaskReport getReport();
TaskState getState();
Counters getCounters();
float getProgress();
TaskType getType();
Map<TaskAttemptId, TaskAttempt> getAttempts();
TaskAttempt getAttempt(TaskAttemptId attemptID);
/** Has Task reached the final state or not.
*/
boolean isFinished();
/**
* Can the output of the taskAttempt be committed. Note that once the task
* gives a go for a commit, further canCommit requests from any other attempts
* should return false.
*
* @param taskAttemptID
* @return whether the attempt's output can be committed or not.
*/
boolean canCommit(TaskAttemptId taskAttemptID);
}
| 1,944
| 31.966102
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
/**
* Main interface to interact with the job.
*/
public interface Job {
JobId getID();
String getName();
JobState getState();
JobReport getReport();
/**
* Get all the counters of this job. This includes job-counters aggregated
* together with the counters of each task. This creates a clone of the
* Counters, so use this judiciously.
* @return job-counters and aggregate task-counters
*/
Counters getAllCounters();
Map<TaskId,Task> getTasks();
Map<TaskId,Task> getTasks(TaskType taskType);
Task getTask(TaskId taskID);
List<String> getDiagnostics();
int getTotalMaps();
int getTotalReduces();
int getCompletedMaps();
int getCompletedReduces();
float getProgress();
boolean isUber();
String getUserName();
String getQueueName();
/**
* @return a path to where the config file for this job is located.
*/
Path getConfFile();
/**
* @return a parsed version of the config files pointed to by
* {@link #getConfFile()}.
* @throws IOException on any error trying to load the conf file.
*/
Configuration loadConfFile() throws IOException;
/**
* @return the ACLs for this job for each type of JobACL given.
*/
Map<JobACL, AccessControlList> getJobACLs();
TaskAttemptCompletionEvent[]
getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
TaskCompletionEvent[]
getMapAttemptCompletionEvents(int startIndex, int maxEvents);
/**
* @return information for MR AppMasters (previously failed and current)
*/
List<AMInfo> getAMInfos();
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
public void setQueueName(String queueName);
}
| 3,383
| 31.538462
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.classification.InterfaceAudience;
| 946
| 44.095238
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptDiagnosticsUpdateEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class TaskAttemptDiagnosticsUpdateEvent extends TaskAttemptEvent {
private String diagnosticInfo;
public TaskAttemptDiagnosticsUpdateEvent(TaskAttemptId attemptID,
String diagnosticInfo) {
super(attemptID, TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE);
this.diagnosticInfo = diagnosticInfo;
}
public String getDiagnosticInfo() {
return diagnosticInfo;
}
}
| 1,306
| 33.394737
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobDiagnosticsUpdateEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobDiagnosticsUpdateEvent extends JobEvent {
private String diagnosticUpdate;
public JobDiagnosticsUpdateEvent(JobId jobID, String diagnostic) {
super(jobID, JobEventType.JOB_DIAGNOSTIC_UPDATE);
this.diagnosticUpdate = diagnostic;
}
public String getDiagnosticUpdate() {
return this.diagnosticUpdate;
}
}
| 1,249
| 32.783784
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobSetupCompletedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobSetupCompletedEvent extends JobEvent {
public JobSetupCompletedEvent(JobId jobID) {
super(jobID, JobEventType.JOB_SETUP_COMPLETED);
}
}
| 1,082
| 36.344828
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
/**
* this class encapsulates task related events.
*
*/
public class TaskEvent extends AbstractEvent<TaskEventType> {
private TaskId taskID;
public TaskEvent(TaskId taskID, TaskEventType type) {
super(type);
this.taskID = taskID;
}
public TaskId getTaskID() {
return taskID;
}
}
| 1,264
| 29.853659
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobFinishEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class JobFinishEvent
extends AbstractEvent<JobFinishEvent.Type> {
public enum Type {
STATE_CHANGED
}
private JobId jobID;
public JobFinishEvent(JobId jobID) {
super(Type.STATE_CHANGED);
this.jobID = jobID;
}
public JobId getJobId() {
return jobID;
}
}
| 1,261
| 28.348837
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
/**
* Event types handled by Task.
*/
public enum TaskEventType {
//Producer:Client, Job
T_KILL,
//Producer:Job
T_SCHEDULE,
T_RECOVER,
//Producer:Speculator
T_ADD_SPEC_ATTEMPT,
//Producer:TaskAttempt
T_ATTEMPT_LAUNCHED,
T_ATTEMPT_COMMIT_PENDING,
T_ATTEMPT_FAILED,
T_ATTEMPT_SUCCEEDED,
T_ATTEMPT_KILLED
}
| 1,183
| 26.534884
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptFetchFailureEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class JobTaskAttemptFetchFailureEvent extends JobEvent {
private final TaskAttemptId reduce;
private final List<TaskAttemptId> maps;
private final String hostname;
public JobTaskAttemptFetchFailureEvent(TaskAttemptId reduce,
List<TaskAttemptId> maps, String host) {
super(reduce.getTaskId().getJobId(),
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE);
this.reduce = reduce;
this.maps = maps;
this.hostname = host;
}
public List<TaskAttemptId> getMaps() {
return maps;
}
public TaskAttemptId getReduce() {
return reduce;
}
public String getHost() {
return hostname;
}
}
| 1,582
| 28.314815
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptKillEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class TaskAttemptKillEvent extends TaskAttemptEvent {
private final String message;
public TaskAttemptKillEvent(TaskAttemptId attemptID,
String message) {
super(attemptID, TaskAttemptEventType.TA_KILL);
this.message = message;
}
public String getMessage() {
return message;
}
}
| 1,230
| 31.394737
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobSetupFailedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobSetupFailedEvent extends JobEvent {
private String message;
public JobSetupFailedEvent(JobId jobID, String message) {
super(jobID, JobEventType.JOB_SETUP_FAILED);
this.message = message;
}
public String getMessage() {
return message;
}
}
| 1,200
| 32.361111
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobUpdatedNodesEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.yarn.api.records.NodeReport;
public class JobUpdatedNodesEvent extends JobEvent {
private final List<NodeReport> updatedNodes;
public JobUpdatedNodesEvent(JobId jobId, List<NodeReport> updatedNodes) {
super(jobId, JobEventType.JOB_UPDATED_NODES);
this.updatedNodes = updatedNodes;
}
public List<NodeReport> getUpdatedNodes() {
return updatedNodes;
}
}
| 1,333
| 31.536585
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
/**
* Event types handled by Job.
*/
public enum JobEventType {
//Producer:Client
JOB_KILL,
//Producer:MRAppMaster
JOB_INIT,
JOB_INIT_FAILED,
JOB_START,
//Producer:Task
JOB_TASK_COMPLETED,
JOB_MAP_TASK_RESCHEDULED,
JOB_TASK_ATTEMPT_COMPLETED,
//Producer:CommitterEventHandler
JOB_SETUP_COMPLETED,
JOB_SETUP_FAILED,
JOB_COMMIT_COMPLETED,
JOB_COMMIT_FAILED,
JOB_ABORT_COMPLETED,
//Producer:Job
JOB_COMPLETED,
JOB_FAIL_WAIT_TIMEDOUT,
//Producer:Any component
JOB_DIAGNOSTIC_UPDATE,
INTERNAL_ERROR,
JOB_COUNTER_UPDATE,
//Producer:TaskAttemptListener
JOB_TASK_ATTEMPT_FETCH_FAILURE,
//Producer:RMContainerAllocator
JOB_UPDATED_NODES,
JOB_AM_REBOOT
}
| 1,562
| 24.209677
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptRecoverEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class TaskAttemptRecoverEvent extends TaskAttemptEvent {
private TaskAttemptInfo taInfo;
private OutputCommitter committer;
private boolean recoverAttemptOutput;
public TaskAttemptRecoverEvent(TaskAttemptId id, TaskAttemptInfo taInfo,
OutputCommitter committer, boolean recoverOutput) {
super(id, TaskAttemptEventType.TA_RECOVER);
this.taInfo = taInfo;
this.committer = committer;
this.recoverAttemptOutput = recoverOutput;
}
public TaskAttemptInfo getTaskAttemptInfo() {
return taInfo;
}
public OutputCommitter getCommitter() {
return committer;
}
public boolean getRecoverOutput() {
return recoverAttemptOutput;
}
}
| 1,747
| 33.27451
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobMapTaskRescheduledEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
public class JobMapTaskRescheduledEvent extends JobEvent {
private TaskId taskID;
public JobMapTaskRescheduledEvent(TaskId taskID) {
super(taskID.getJobId(), JobEventType.JOB_MAP_TASK_RESCHEDULED);
this.taskID = taskID;
}
public TaskId getTaskID() {
return taskID;
}
}
| 1,203
| 29.871795
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCounterUpdateEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobCounterUpdateEvent extends JobEvent {
List<CounterIncrementalUpdate> counterUpdates = null;
public JobCounterUpdateEvent(JobId jobId) {
super(jobId, JobEventType.JOB_COUNTER_UPDATE);
counterUpdates = new ArrayList<JobCounterUpdateEvent.CounterIncrementalUpdate>();
}
public void addCounterUpdate(Enum<?> key, long incrValue) {
counterUpdates.add(new CounterIncrementalUpdate(key, incrValue));
}
public List<CounterIncrementalUpdate> getCounterUpdates() {
return counterUpdates;
}
public static class CounterIncrementalUpdate {
Enum<?> key;
long incrValue;
public CounterIncrementalUpdate(Enum<?> key, long incrValue) {
this.key = key;
this.incrValue = incrValue;
}
public Enum<?> getCounterKey() {
return key;
}
public long getIncrementValue() {
return incrValue;
}
}
}
| 1,871
| 29.688525
| 85
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
public class JobTaskEvent extends JobEvent {
private TaskId taskID;
private TaskState taskState;
public JobTaskEvent(TaskId taskID, TaskState taskState) {
super(taskID.getJobId(), JobEventType.JOB_TASK_COMPLETED);
this.taskID = taskID;
this.taskState = taskState;
}
public TaskId getTaskID() {
return taskID;
}
public TaskState getState() {
return taskState;
}
}
| 1,371
| 30.181818
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobStartEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobStartEvent extends JobEvent {
long recoveredJobStartTime;
public JobStartEvent(JobId jobID) {
this(jobID, -1L);
}
public JobStartEvent(JobId jobID, long recoveredJobStartTime) {
super(jobID, JobEventType.JOB_START);
this.recoveredJobStartTime = recoveredJobStartTime;
}
public long getRecoveredJobStartTime() {
return recoveredJobStartTime;
}
}
| 1,316
| 31.925
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobTaskAttemptCompletedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
public class JobTaskAttemptCompletedEvent extends JobEvent {
private TaskAttemptCompletionEvent completionEvent;
public JobTaskAttemptCompletedEvent(TaskAttemptCompletionEvent completionEvent) {
super(completionEvent.getAttemptId().getTaskId().getJobId(),
JobEventType.JOB_TASK_ATTEMPT_COMPLETED);
this.completionEvent = completionEvent;
}
public TaskAttemptCompletionEvent getCompletionEvent() {
return completionEvent;
}
}
| 1,386
| 35.5
| 83
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskTAttemptEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class TaskTAttemptEvent extends TaskEvent {
private TaskAttemptId attemptID;
public TaskTAttemptEvent(TaskAttemptId id, TaskEventType type) {
super(id.getTaskId(), type);
this.attemptID = id;
}
public TaskAttemptId getTaskAttemptID() {
return attemptID;
}
}
| 1,205
| 30.736842
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptTooManyFetchFailureEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
/**
* TaskAttemptTooManyFetchFailureEvent is used for TA_TOO_MANY_FETCH_FAILURE.
*/
public class TaskAttemptTooManyFetchFailureEvent extends TaskAttemptEvent {
private TaskAttemptId reduceID;
private String reduceHostname;
/**
* Create a new TaskAttemptTooManyFetchFailureEvent.
* @param attemptId the id of the mapper task attempt
* @param reduceId the id of the reporting reduce task attempt.
* @param reduceHost the hostname of the reporting reduce task attempt.
*/
public TaskAttemptTooManyFetchFailureEvent(TaskAttemptId attemptId,
TaskAttemptId reduceId, String reduceHost) {
super(attemptId, TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE);
this.reduceID = reduceId;
this.reduceHostname = reduceHost;
}
public TaskAttemptId getReduceId() {
return reduceID;
}
public String getReduceHost() {
return reduceHostname;
}
}
| 1,807
| 34.45098
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
public class TaskAttemptContainerAssignedEvent extends TaskAttemptEvent {
private final Container container;
private final Map<ApplicationAccessType, String> applicationACLs;
public TaskAttemptContainerAssignedEvent(TaskAttemptId id,
Container container, Map<ApplicationAccessType, String> applicationACLs) {
super(id, TaskAttemptEventType.TA_ASSIGNED);
this.container = container;
this.applicationACLs = applicationACLs;
}
public Container getContainer() {
return this.container;
}
public Map<ApplicationAccessType, String> getApplicationACLs() {
return this.applicationACLs;
}
}
| 1,681
| 34.787234
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCommitCompletedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobCommitCompletedEvent extends JobEvent {
public JobCommitCompletedEvent(JobId jobID) {
super(jobID, JobEventType.JOB_COMMIT_COMPLETED);
}
}
| 1,084
| 37.75
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobAbortCompletedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobAbortCompletedEvent extends JobEvent {
private JobStatus.State finalState;
public JobAbortCompletedEvent(JobId jobID, JobStatus.State finalState) {
super(jobID, JobEventType.JOB_ABORT_COMPLETED);
this.finalState = finalState;
}
public JobStatus.State getFinalState() {
return finalState;
}
}
| 1,300
| 34.162162
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
/**
* Event types handled by TaskAttempt.
*/
public enum TaskAttemptEventType {
//Producer:Task
TA_SCHEDULE,
TA_RESCHEDULE,
TA_RECOVER,
//Producer:Client, Task
TA_KILL,
//Producer:ContainerAllocator
TA_ASSIGNED,
TA_CONTAINER_COMPLETED,
//Producer:ContainerLauncher
TA_CONTAINER_LAUNCHED,
TA_CONTAINER_LAUNCH_FAILED,
TA_CONTAINER_CLEANED,
//Producer:TaskAttemptListener
TA_DIAGNOSTICS_UPDATE,
TA_COMMIT_PENDING,
TA_DONE,
TA_FAILMSG,
TA_UPDATE,
TA_TIMED_OUT,
//Producer:Client
TA_FAILMSG_BY_CLIENT,
//Producer:TaskCleaner
TA_CLEANUP_DONE,
//Producer:Job
TA_TOO_MANY_FETCH_FAILURE,
}
| 1,491
| 23.866667
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
/**
* This class encapsulates task attempt related events.
*
*/
public class TaskAttemptEvent extends AbstractEvent<TaskAttemptEventType> {
private TaskAttemptId attemptID;
/**
* Create a new TaskAttemptEvent.
* @param id the id of the task attempt
* @param type the type of event that happened.
*/
public TaskAttemptEvent(TaskAttemptId id, TaskAttemptEventType type) {
super(type);
this.attemptID = id;
}
public TaskAttemptId getTaskAttemptID() {
return attemptID;
}
}
| 1,478
| 31.152174
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import java.util.List;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
private TaskAttemptStatus reportedTaskAttemptStatus;
public TaskAttemptStatusUpdateEvent(TaskAttemptId id,
TaskAttemptStatus taskAttemptStatus) {
super(id, TaskAttemptEventType.TA_UPDATE);
this.reportedTaskAttemptStatus = taskAttemptStatus;
}
public TaskAttemptStatus getReportedTaskAttemptStatus() {
return reportedTaskAttemptStatus;
}
/**
* The internal TaskAttemptStatus object corresponding to remote Task status.
*
*/
public static class TaskAttemptStatus {
public TaskAttemptId id;
public float progress;
public Counters counters;
public String stateString;
public Phase phase;
public List<TaskAttemptId> fetchFailedMaps;
public long mapFinishTime;
public long shuffleFinishTime;
public long sortFinishTime;
public TaskAttemptState taskState;
}
}
| 2,017
| 33.20339
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerLaunchedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class TaskAttemptContainerLaunchedEvent extends TaskAttemptEvent {
private int shufflePort;
/**
* Create a new TaskAttemptEvent.
* @param id the id of the task attempt
* @param shufflePort the port that shuffle is listening on.
*/
public TaskAttemptContainerLaunchedEvent(TaskAttemptId id, int shufflePort) {
super(id, TaskAttemptEventType.TA_CONTAINER_LAUNCHED);
this.shufflePort = shufflePort;
}
/**
* Get the port that the shuffle handler is listening on. This is only
* valid if the type of the event is TA_CONTAINER_LAUNCHED
* @return the port the shuffle handler is listening on.
*/
public int getShufflePort() {
return shufflePort;
}
}
| 1,618
| 34.195652
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.yarn.event.AbstractEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
/**
* This class encapsulates job related events.
*
*/
public class JobEvent extends AbstractEvent<JobEventType> {
private JobId jobID;
public JobEvent(JobId jobID, JobEventType type) {
super(type);
this.jobID = jobID;
}
public JobId getJobId() {
return jobID;
}
}
| 1,250
| 28.785714
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskRecoverEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
public class TaskRecoverEvent extends TaskEvent {
private TaskInfo taskInfo;
private OutputCommitter committer;
private boolean recoverTaskOutput;
public TaskRecoverEvent(TaskId taskID, TaskInfo taskInfo,
OutputCommitter committer, boolean recoverTaskOutput) {
super(taskID, TaskEventType.T_RECOVER);
this.taskInfo = taskInfo;
this.committer = committer;
this.recoverTaskOutput = recoverTaskOutput;
}
public TaskInfo getTaskInfo() {
return taskInfo;
}
public OutputCommitter getOutputCommitter() {
return committer;
}
public boolean getRecoverTaskOutput() {
return recoverTaskOutput;
}
}
| 1,696
| 32.27451
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/JobCommitFailedEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.event;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class JobCommitFailedEvent extends JobEvent {
private String message;
public JobCommitFailedEvent(JobId jobID, String message) {
super(jobID, JobEventType.JOB_COMMIT_FAILED);
this.message = message;
}
public String getMessage() {
return this.message;
}
}
| 1,207
| 33.514286
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/ReduceTaskImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.ReduceTaskAttemptImpl;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
@SuppressWarnings({ "rawtypes" })
public class ReduceTaskImpl extends TaskImpl {
private final int numMapTasks;
public ReduceTaskImpl(JobId jobId, int partition,
EventHandler eventHandler, Path jobFile, JobConf conf,
int numMapTasks, TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
super(jobId, TaskType.REDUCE, partition, eventHandler, jobFile, conf,
taskAttemptListener, jobToken, credentials, clock,
appAttemptId, metrics, appContext);
this.numMapTasks = numMapTasks;
}
@Override
protected int getMaxAttempts() {
return conf.getInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 4);
}
@Override
protected TaskAttemptImpl createAttempt() {
return new ReduceTaskAttemptImpl(getID(), nextAttemptNumber,
eventHandler, jobFile,
partition, numMapTasks, conf, taskAttemptListener,
jobToken, credentials, clock, appContext);
}
@Override
public TaskType getType() {
return TaskType.REDUCE;
}
}
| 2,707
| 36.611111
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import org.apache.hadoop.classification.InterfaceAudience;
| 945
| 44.047619
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.records.Avataar;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobMapTaskRescheduledEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptRecoverEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Clock;
import com.google.common.annotations.VisibleForTesting;
/**
* Implementation of Task interface.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
private static final Log LOG = LogFactory.getLog(TaskImpl.class);
private static final String SPECULATION = "Speculation: ";
protected final JobConf conf;
protected final Path jobFile;
protected final int partition;
protected final TaskAttemptListener taskAttemptListener;
protected final EventHandler eventHandler;
private final TaskId taskId;
private Map<TaskAttemptId, TaskAttempt> attempts;
private final int maxAttempts;
protected final Clock clock;
private final Lock readLock;
private final Lock writeLock;
private final MRAppMetrics metrics;
protected final AppContext appContext;
private long scheduledTime;
private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
protected boolean encryptedShuffle;
protected Credentials credentials;
protected Token<JobTokenIdentifier> jobToken;
//should be set to one which comes first
//saying COMMIT_PENDING
private TaskAttemptId commitAttempt;
private TaskAttemptId successfulAttempt;
private final Set<TaskAttemptId> failedAttempts;
// Track the finished attempts - successful, failed and killed
private final Set<TaskAttemptId> finishedAttempts;
// counts the number of attempts that are either running or in a state where
// they will come to be running when they get a Container
private final Set<TaskAttemptId> inProgressAttempts;
private boolean historyTaskStartGenerated = false;
private static final SingleArcTransition<TaskImpl, TaskEvent>
ATTEMPT_KILLED_TRANSITION = new AttemptKilledTransition();
private static final SingleArcTransition<TaskImpl, TaskEvent>
KILL_TRANSITION = new KillTransition();
private static final StateMachineFactory
<TaskImpl, TaskStateInternal, TaskEventType, TaskEvent>
stateMachineFactory
= new StateMachineFactory<TaskImpl, TaskStateInternal, TaskEventType, TaskEvent>
(TaskStateInternal.NEW)
// define the state machine of Task
// Transitions from NEW state
.addTransition(TaskStateInternal.NEW, TaskStateInternal.SCHEDULED,
TaskEventType.T_SCHEDULE, new InitialScheduleTransition())
.addTransition(TaskStateInternal.NEW, TaskStateInternal.KILLED,
TaskEventType.T_KILL, new KillNewTransition())
.addTransition(TaskStateInternal.NEW,
EnumSet.of(TaskStateInternal.FAILED,
TaskStateInternal.KILLED,
TaskStateInternal.RUNNING,
TaskStateInternal.SUCCEEDED),
TaskEventType.T_RECOVER, new RecoverTransition())
// Transitions from SCHEDULED state
//when the first attempt is launched, the task state is set to RUNNING
.addTransition(TaskStateInternal.SCHEDULED, TaskStateInternal.RUNNING,
TaskEventType.T_ATTEMPT_LAUNCHED, new LaunchTransition())
.addTransition(TaskStateInternal.SCHEDULED, TaskStateInternal.KILL_WAIT,
TaskEventType.T_KILL, KILL_TRANSITION)
.addTransition(TaskStateInternal.SCHEDULED, TaskStateInternal.SCHEDULED,
TaskEventType.T_ATTEMPT_KILLED, ATTEMPT_KILLED_TRANSITION)
.addTransition(TaskStateInternal.SCHEDULED,
EnumSet.of(TaskStateInternal.SCHEDULED, TaskStateInternal.FAILED),
TaskEventType.T_ATTEMPT_FAILED,
new AttemptFailedTransition())
// Transitions from RUNNING state
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.RUNNING,
TaskEventType.T_ATTEMPT_LAUNCHED) //more attempts may start later
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.RUNNING,
TaskEventType.T_ATTEMPT_COMMIT_PENDING,
new AttemptCommitPendingTransition())
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.RUNNING,
TaskEventType.T_ADD_SPEC_ATTEMPT, new RedundantScheduleTransition())
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.SUCCEEDED,
TaskEventType.T_ATTEMPT_SUCCEEDED,
new AttemptSucceededTransition())
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.RUNNING,
TaskEventType.T_ATTEMPT_KILLED,
ATTEMPT_KILLED_TRANSITION)
.addTransition(TaskStateInternal.RUNNING,
EnumSet.of(TaskStateInternal.RUNNING, TaskStateInternal.FAILED),
TaskEventType.T_ATTEMPT_FAILED,
new AttemptFailedTransition())
.addTransition(TaskStateInternal.RUNNING, TaskStateInternal.KILL_WAIT,
TaskEventType.T_KILL, KILL_TRANSITION)
// Transitions from KILL_WAIT state
.addTransition(TaskStateInternal.KILL_WAIT,
EnumSet.of(TaskStateInternal.KILL_WAIT, TaskStateInternal.KILLED),
TaskEventType.T_ATTEMPT_KILLED,
new KillWaitAttemptKilledTransition())
.addTransition(TaskStateInternal.KILL_WAIT,
EnumSet.of(TaskStateInternal.KILL_WAIT, TaskStateInternal.KILLED),
TaskEventType.T_ATTEMPT_SUCCEEDED,
new KillWaitAttemptSucceededTransition())
.addTransition(TaskStateInternal.KILL_WAIT,
EnumSet.of(TaskStateInternal.KILL_WAIT, TaskStateInternal.KILLED),
TaskEventType.T_ATTEMPT_FAILED,
new KillWaitAttemptFailedTransition())
// Ignore-able transitions.
.addTransition(
TaskStateInternal.KILL_WAIT,
TaskStateInternal.KILL_WAIT,
EnumSet.of(TaskEventType.T_KILL,
TaskEventType.T_ATTEMPT_LAUNCHED,
TaskEventType.T_ATTEMPT_COMMIT_PENDING,
TaskEventType.T_ADD_SPEC_ATTEMPT))
// Transitions from SUCCEEDED state
.addTransition(TaskStateInternal.SUCCEEDED,
EnumSet.of(TaskStateInternal.SCHEDULED, TaskStateInternal.SUCCEEDED, TaskStateInternal.FAILED),
TaskEventType.T_ATTEMPT_FAILED, new RetroactiveFailureTransition())
.addTransition(TaskStateInternal.SUCCEEDED,
EnumSet.of(TaskStateInternal.SCHEDULED, TaskStateInternal.SUCCEEDED),
TaskEventType.T_ATTEMPT_KILLED, new RetroactiveKilledTransition())
.addTransition(TaskStateInternal.SUCCEEDED, TaskStateInternal.SUCCEEDED,
TaskEventType.T_ATTEMPT_SUCCEEDED,
new AttemptSucceededAtSucceededTransition())
// Ignore-able transitions.
.addTransition(
TaskStateInternal.SUCCEEDED, TaskStateInternal.SUCCEEDED,
EnumSet.of(TaskEventType.T_ADD_SPEC_ATTEMPT,
TaskEventType.T_ATTEMPT_COMMIT_PENDING,
TaskEventType.T_ATTEMPT_LAUNCHED,
TaskEventType.T_KILL))
// Transitions from FAILED state
.addTransition(TaskStateInternal.FAILED, TaskStateInternal.FAILED,
EnumSet.of(TaskEventType.T_KILL,
TaskEventType.T_ADD_SPEC_ATTEMPT,
TaskEventType.T_ATTEMPT_COMMIT_PENDING,
TaskEventType.T_ATTEMPT_FAILED,
TaskEventType.T_ATTEMPT_KILLED,
TaskEventType.T_ATTEMPT_LAUNCHED,
TaskEventType.T_ATTEMPT_SUCCEEDED))
// Transitions from KILLED state
// There could be a race condition where TaskImpl might receive
// T_ATTEMPT_SUCCEEDED followed by T_ATTEMPTED_KILLED for the same attempt.
// a. The task is in KILL_WAIT.
// b. Before TA transitions to SUCCEEDED state, Task sends TA_KILL event.
// c. TA transitions to SUCCEEDED state and thus send T_ATTEMPT_SUCCEEDED
// to the task. The task transitions to KILLED state.
// d. TA processes TA_KILL event and sends T_ATTEMPT_KILLED to the task.
.addTransition(TaskStateInternal.KILLED, TaskStateInternal.KILLED,
EnumSet.of(TaskEventType.T_KILL,
TaskEventType.T_ATTEMPT_KILLED,
TaskEventType.T_ADD_SPEC_ATTEMPT))
// create the topology tables
.installTopology();
private final StateMachine<TaskStateInternal, TaskEventType, TaskEvent>
stateMachine;
// By default, the next TaskAttempt number is zero. Changes during recovery
protected int nextAttemptNumber = 0;
// For sorting task attempts by completion time
private static final Comparator<TaskAttemptInfo> TA_INFO_COMPARATOR =
new Comparator<TaskAttemptInfo>() {
@Override
public int compare(TaskAttemptInfo a, TaskAttemptInfo b) {
long diff = a.getFinishTime() - b.getFinishTime();
return diff == 0 ? 0 : (diff < 0 ? -1 : 1);
}
};
@Override
public TaskState getState() {
readLock.lock();
try {
return getExternalState(getInternalState());
} finally {
readLock.unlock();
}
}
public TaskImpl(JobId jobId, TaskType taskType, int partition,
EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
this.conf = conf;
this.clock = clock;
this.jobFile = remoteJobConfFile;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
this.attempts = Collections.emptyMap();
this.finishedAttempts = new HashSet<TaskAttemptId>(2);
this.failedAttempts = new HashSet<TaskAttemptId>(2);
this.inProgressAttempts = new HashSet<TaskAttemptId>(2);
// This overridable method call is okay in a constructor because we
// have a convention that none of the overrides depends on any
// fields that need initialization.
maxAttempts = getMaxAttempts();
taskId = MRBuilderUtils.newTaskId(jobId, partition, taskType);
this.partition = partition;
this.taskAttemptListener = taskAttemptListener;
this.eventHandler = eventHandler;
this.credentials = credentials;
this.jobToken = jobToken;
this.metrics = metrics;
this.appContext = appContext;
this.encryptedShuffle = conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
stateMachine = stateMachineFactory.make(this);
// All the new TaskAttemptIDs are generated based on MR
// ApplicationAttemptID so that attempts from previous lives don't
// over-step the current one. This assumes that a task won't have more
// than 1000 attempts in its single generation, which is very reasonable.
nextAttemptNumber = (appAttemptId - 1) * 1000;
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
readLock.lock();
try {
if (attempts.size() <= 1) {
return attempts;
}
Map<TaskAttemptId, TaskAttempt> result
= new LinkedHashMap<TaskAttemptId, TaskAttempt>();
result.putAll(attempts);
return result;
} finally {
readLock.unlock();
}
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
readLock.lock();
try {
return attempts.get(attemptID);
} finally {
readLock.unlock();
}
}
@Override
public TaskId getID() {
return taskId;
}
@Override
public boolean isFinished() {
readLock.lock();
try {
// TODO: Use stateMachine level method?
return (getInternalState() == TaskStateInternal.SUCCEEDED ||
getInternalState() == TaskStateInternal.FAILED ||
getInternalState() == TaskStateInternal.KILLED);
} finally {
readLock.unlock();
}
}
@Override
public TaskReport getReport() {
TaskReport report = recordFactory.newRecordInstance(TaskReport.class);
readLock.lock();
try {
TaskAttempt bestAttempt = selectBestAttempt();
report.setTaskId(taskId);
report.setStartTime(getLaunchTime());
report.setFinishTime(getFinishTime());
report.setTaskState(getState());
report.setProgress(bestAttempt == null ? 0f : bestAttempt.getProgress());
report.setStatus(bestAttempt == null
? ""
: bestAttempt.getReport().getStateString());
for (TaskAttempt attempt : attempts.values()) {
if (TaskAttemptState.RUNNING.equals(attempt.getState())) {
report.addRunningAttempt(attempt.getID());
}
}
report.setSuccessfulAttempt(successfulAttempt);
for (TaskAttempt att : attempts.values()) {
String prefix = "AttemptID:" + att.getID() + " Info:";
for (CharSequence cs : att.getDiagnostics()) {
report.addDiagnostics(prefix + cs);
}
}
// Add a copy of counters as the last step so that their lifetime on heap
// is as small as possible.
report.setCounters(TypeConverter.toYarn(bestAttempt == null
? TaskAttemptImpl.EMPTY_COUNTERS
: bestAttempt.getCounters()));
return report;
} finally {
readLock.unlock();
}
}
@Override
public Counters getCounters() {
Counters counters = null;
readLock.lock();
try {
TaskAttempt bestAttempt = selectBestAttempt();
if (bestAttempt != null) {
counters = bestAttempt.getCounters();
} else {
counters = TaskAttemptImpl.EMPTY_COUNTERS;
// counters.groups = new HashMap<CharSequence, CounterGroup>();
}
return counters;
} finally {
readLock.unlock();
}
}
@Override
public float getProgress() {
readLock.lock();
try {
TaskAttempt bestAttempt = selectBestAttempt();
if (bestAttempt == null) {
return 0f;
}
return bestAttempt.getProgress();
} finally {
readLock.unlock();
}
}
@VisibleForTesting
public TaskStateInternal getInternalState() {
readLock.lock();
try {
return stateMachine.getCurrentState();
} finally {
readLock.unlock();
}
}
private static TaskState getExternalState(TaskStateInternal smState) {
if (smState == TaskStateInternal.KILL_WAIT) {
return TaskState.KILLED;
} else {
return TaskState.valueOf(smState.name());
}
}
//this is always called in read/write lock
private long getLaunchTime() {
long taskLaunchTime = 0;
boolean launchTimeSet = false;
for (TaskAttempt at : attempts.values()) {
// select the least launch time of all attempts
long attemptLaunchTime = at.getLaunchTime();
if (attemptLaunchTime != 0 && !launchTimeSet) {
// For the first non-zero launch time
launchTimeSet = true;
taskLaunchTime = attemptLaunchTime;
} else if (attemptLaunchTime != 0 && taskLaunchTime > attemptLaunchTime) {
taskLaunchTime = attemptLaunchTime;
}
}
if (!launchTimeSet) {
return this.scheduledTime;
}
return taskLaunchTime;
}
//this is always called in read/write lock
//TODO Verify behaviour is Task is killed (no finished attempt)
private long getFinishTime() {
if (!isFinished()) {
return 0;
}
long finishTime = 0;
for (TaskAttempt at : attempts.values()) {
//select the max finish time of all attempts
if (finishTime < at.getFinishTime()) {
finishTime = at.getFinishTime();
}
}
return finishTime;
}
private long getFinishTime(TaskAttemptId taId) {
if (taId == null) {
return clock.getTime();
}
long finishTime = 0;
for (TaskAttempt at : attempts.values()) {
//select the max finish time of all attempts
if (at.getID().equals(taId)) {
return at.getFinishTime();
}
}
return finishTime;
}
private TaskStateInternal finished(TaskStateInternal finalState) {
if (getInternalState() == TaskStateInternal.RUNNING) {
metrics.endRunningTask(this);
}
return finalState;
}
//select the nextAttemptNumber with best progress
// always called inside the Read Lock
private TaskAttempt selectBestAttempt() {
if (successfulAttempt != null) {
return attempts.get(successfulAttempt);
}
float progress = 0f;
TaskAttempt result = null;
for (TaskAttempt at : attempts.values()) {
switch (at.getState()) {
// ignore all failed task attempts
case FAILED:
case KILLED:
continue;
}
if (result == null) {
result = at; //The first time around
}
// calculate the best progress
float attemptProgress = at.getProgress();
if (attemptProgress > progress) {
result = at;
progress = attemptProgress;
}
}
return result;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
readLock.lock();
boolean canCommit = false;
try {
if (commitAttempt != null) {
canCommit = taskAttemptID.equals(commitAttempt);
LOG.info("Result of canCommit for " + taskAttemptID + ":" + canCommit);
}
} finally {
readLock.unlock();
}
return canCommit;
}
protected abstract TaskAttemptImpl createAttempt();
// No override of this method may require that the subclass be initialized.
protected abstract int getMaxAttempts();
protected TaskAttempt getSuccessfulAttempt() {
readLock.lock();
try {
if (null == successfulAttempt) {
return null;
}
return attempts.get(successfulAttempt);
} finally {
readLock.unlock();
}
}
// This is always called in the Write Lock
private void addAndScheduleAttempt(Avataar avataar) {
TaskAttempt attempt = addAttempt(avataar);
inProgressAttempts.add(attempt.getID());
//schedule the nextAttemptNumber
if (failedAttempts.size() > 0) {
eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
TaskAttemptEventType.TA_RESCHEDULE));
} else {
eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
TaskAttemptEventType.TA_SCHEDULE));
}
}
private TaskAttemptImpl addAttempt(Avataar avataar) {
TaskAttemptImpl attempt = createAttempt();
attempt.setAvataar(avataar);
if (LOG.isDebugEnabled()) {
LOG.debug("Created attempt " + attempt.getID());
}
switch (attempts.size()) {
case 0:
attempts = Collections.singletonMap(attempt.getID(),
(TaskAttempt) attempt);
break;
case 1:
Map<TaskAttemptId, TaskAttempt> newAttempts
= new LinkedHashMap<TaskAttemptId, TaskAttempt>(maxAttempts);
newAttempts.putAll(attempts);
attempts = newAttempts;
attempts.put(attempt.getID(), attempt);
break;
default:
attempts.put(attempt.getID(), attempt);
break;
}
++nextAttemptNumber;
return attempt;
}
@Override
public void handle(TaskEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskID() + " of type "
+ event.getType());
}
try {
writeLock.lock();
TaskStateInternal oldState = getInternalState();
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state for "
+ this.taskId, e);
internalError(event.getType());
}
if (oldState != getInternalState()) {
LOG.info(taskId + " Task Transitioned from " + oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
protected void internalError(TaskEventType type) {
LOG.error("Invalid event " + type + " on Task " + this.taskId);
eventHandler.handle(new JobDiagnosticsUpdateEvent(
this.taskId.getJobId(), "Invalid event " + type +
" on Task " + this.taskId));
eventHandler.handle(new JobEvent(this.taskId.getJobId(),
JobEventType.INTERNAL_ERROR));
}
// always called inside a transition, in turn inside the Write Lock
private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
TaskAttemptCompletionEventStatus status) {
TaskAttempt attempt = attempts.get(attemptId);
//raise the completion event only if the container is assigned
// to nextAttemptNumber
if (attempt.getNodeHttpAddress() != null) {
TaskAttemptCompletionEvent tce = recordFactory
.newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(-1);
String scheme = (encryptedShuffle) ? "https://" : "http://";
tce.setMapOutputServerAddress(StringInterner.weakIntern(scheme
+ attempt.getNodeHttpAddress().split(":")[0] + ":"
+ attempt.getShufflePort()));
tce.setStatus(status);
tce.setAttemptId(attempt.getID());
int runTime = 0;
if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
tce.setAttemptRunTime(runTime);
//raise the event to job so that it adds the completion event to its
//data structures
eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
}
}
private void sendTaskStartedEvent() {
TaskStartedEvent tse = new TaskStartedEvent(
TypeConverter.fromYarn(taskId), getLaunchTime(),
TypeConverter.fromYarn(taskId.getTaskType()),
getSplitsAsString());
eventHandler
.handle(new JobHistoryEvent(taskId.getJobId(), tse));
historyTaskStartGenerated = true;
}
private static TaskFinishedEvent createTaskFinishedEvent(TaskImpl task, TaskStateInternal taskState) {
TaskFinishedEvent tfe =
new TaskFinishedEvent(TypeConverter.fromYarn(task.taskId),
TypeConverter.fromYarn(task.successfulAttempt),
task.getFinishTime(task.successfulAttempt),
TypeConverter.fromYarn(task.taskId.getTaskType()),
taskState.toString(),
task.getCounters());
return tfe;
}
private static TaskFailedEvent createTaskFailedEvent(TaskImpl task, List<String> diag, TaskStateInternal taskState, TaskAttemptId taId) {
StringBuilder errorSb = new StringBuilder();
if (diag != null) {
for (String d : diag) {
errorSb.append(", ").append(d);
}
}
TaskFailedEvent taskFailedEvent = new TaskFailedEvent(
TypeConverter.fromYarn(task.taskId),
// Hack since getFinishTime needs isFinished to be true and that doesn't happen till after the transition.
task.getFinishTime(taId),
TypeConverter.fromYarn(task.getType()),
errorSb.toString(),
taskState.toString(),
taId == null ? null : TypeConverter.fromYarn(taId),
task.getCounters());
return taskFailedEvent;
}
private static void unSucceed(TaskImpl task) {
task.commitAttempt = null;
task.successfulAttempt = null;
}
private void sendTaskSucceededEvents() {
eventHandler.handle(new JobTaskEvent(taskId, TaskState.SUCCEEDED));
LOG.info("Task succeeded with attempt " + successfulAttempt);
if (historyTaskStartGenerated) {
TaskFinishedEvent tfe = createTaskFinishedEvent(this,
TaskStateInternal.SUCCEEDED);
eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
}
}
/**
* @return a String representation of the splits.
*
* Subclasses can override this method to provide their own representations
* of splits (if any).
*
*/
protected String getSplitsAsString(){
return "";
}
/**
* Recover a completed task from a previous application attempt
* @param taskInfo recovered info about the task
* @param recoverTaskOutput whether to recover task outputs
* @return state of the task after recovery
*/
private TaskStateInternal recover(TaskInfo taskInfo,
OutputCommitter committer, boolean recoverTaskOutput) {
LOG.info("Recovering task " + taskId
+ " from prior app attempt, status was " + taskInfo.getTaskStatus());
scheduledTime = taskInfo.getStartTime();
sendTaskStartedEvent();
Collection<TaskAttemptInfo> attemptInfos =
taskInfo.getAllTaskAttempts().values();
if (attemptInfos.size() > 0) {
metrics.launchedTask(this);
}
// recover the attempts for this task in the order they finished
// so task attempt completion events are ordered properly
int savedNextAttemptNumber = nextAttemptNumber;
ArrayList<TaskAttemptInfo> taInfos =
new ArrayList<TaskAttemptInfo>(taskInfo.getAllTaskAttempts().values());
Collections.sort(taInfos, TA_INFO_COMPARATOR);
for (TaskAttemptInfo taInfo : taInfos) {
nextAttemptNumber = taInfo.getAttemptId().getId();
TaskAttemptImpl attempt = addAttempt(Avataar.VIRGIN);
// handle the recovery inline so attempts complete before task does
attempt.handle(new TaskAttemptRecoverEvent(attempt.getID(), taInfo,
committer, recoverTaskOutput));
finishedAttempts.add(attempt.getID());
TaskAttemptCompletionEventStatus taces = null;
TaskAttemptState attemptState = attempt.getState();
switch (attemptState) {
case FAILED:
taces = TaskAttemptCompletionEventStatus.FAILED;
break;
case KILLED:
taces = TaskAttemptCompletionEventStatus.KILLED;
break;
case SUCCEEDED:
taces = TaskAttemptCompletionEventStatus.SUCCEEDED;
break;
default:
throw new IllegalStateException(
"Unexpected attempt state during recovery: " + attemptState);
}
if (attemptState == TaskAttemptState.FAILED) {
failedAttempts.add(attempt.getID());
if (failedAttempts.size() >= maxAttempts) {
taces = TaskAttemptCompletionEventStatus.TIPFAILED;
}
}
// don't clobber the successful attempt completion event
// TODO: this shouldn't be necessary after MAPREDUCE-4330
if (successfulAttempt == null) {
handleTaskAttemptCompletion(attempt.getID(), taces);
if (attemptState == TaskAttemptState.SUCCEEDED) {
successfulAttempt = attempt.getID();
}
}
}
nextAttemptNumber = savedNextAttemptNumber;
TaskStateInternal taskState = TaskStateInternal.valueOf(
taskInfo.getTaskStatus());
switch (taskState) {
case SUCCEEDED:
if (successfulAttempt != null) {
sendTaskSucceededEvents();
} else {
LOG.info("Missing successful attempt for task " + taskId
+ ", recovering as RUNNING");
// there must have been a fetch failure and the retry wasn't complete
taskState = TaskStateInternal.RUNNING;
metrics.runningTask(this);
addAndScheduleAttempt(Avataar.VIRGIN);
}
break;
case FAILED:
case KILLED:
{
if (taskState == TaskStateInternal.KILLED && attemptInfos.size() == 0) {
metrics.endWaitingTask(this);
}
TaskFailedEvent tfe = new TaskFailedEvent(taskInfo.getTaskId(),
taskInfo.getFinishTime(), taskInfo.getTaskType(),
taskInfo.getError(), taskInfo.getTaskStatus(),
taskInfo.getFailedDueToAttemptId(), taskInfo.getCounters());
eventHandler.handle(new JobHistoryEvent(taskId.getJobId(), tfe));
eventHandler.handle(
new JobTaskEvent(taskId, getExternalState(taskState)));
break;
}
default:
throw new java.lang.AssertionError("Unexpected recovered task state: "
+ taskState);
}
return taskState;
}
private static class RecoverTransition
implements MultipleArcTransition<TaskImpl, TaskEvent, TaskStateInternal> {
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskRecoverEvent tre = (TaskRecoverEvent) event;
return task.recover(tre.getTaskInfo(), tre.getOutputCommitter(),
tre.getRecoverTaskOutput());
}
}
private static class InitialScheduleTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
task.scheduledTime = task.clock.getTime();
task.sendTaskStartedEvent();
}
}
// Used when creating a new attempt while one is already running.
// Currently we do this for speculation. In the future we may do this
// for tasks that failed in a way that might indicate application code
// problems, so we can take later failures in parallel and flush the
// job quickly when this happens.
private static class RedundantScheduleTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
LOG.info("Scheduling a redundant attempt for task " + task.taskId);
task.addAndScheduleAttempt(Avataar.SPECULATIVE);
}
}
private static class AttemptCommitPendingTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent ev = (TaskTAttemptEvent) event;
// The nextAttemptNumber is commit pending, decide on set the commitAttempt
TaskAttemptId attemptID = ev.getTaskAttemptID();
if (task.commitAttempt == null) {
// TODO: validate attemptID
task.commitAttempt = attemptID;
LOG.info(attemptID + " given a go for committing the task output.");
} else {
// Don't think this can be a pluggable decision, so simply raise an
// event for the TaskAttempt to delete its output.
LOG.info(task.commitAttempt
+ " already given a go for committing the task output, so killing "
+ attemptID);
task.eventHandler.handle(new TaskAttemptKillEvent(attemptID,
SPECULATION + task.commitAttempt + " committed first!"));
}
}
}
private static class AttemptSucceededTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent taskTAttemptEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = taskTAttemptEvent.getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.SUCCEEDED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
task.successfulAttempt = taskAttemptId;
task.sendTaskSucceededEvents();
for (TaskAttempt attempt : task.attempts.values()) {
if (attempt.getID() != task.successfulAttempt &&
// This is okay because it can only talk us out of sending a
// TA_KILL message to an attempt that doesn't need one for
// other reasons.
!attempt.isFinished()) {
LOG.info("Issuing kill to other attempt " + attempt.getID());
task.eventHandler.handle(new TaskAttemptKillEvent(attempt.getID(),
SPECULATION + task.successfulAttempt + " succeeded first!"));
}
}
task.finished(TaskStateInternal.SUCCEEDED);
}
}
private static class AttemptKilledTransition implements
SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.finishedAttempts.add(taskAttemptId);
task.inProgressAttempts.remove(taskAttemptId);
if (task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
if ((task.commitAttempt != null) && (task.commitAttempt == taskAttemptId)) {
task.commitAttempt = null;
}
}
}
private static class KillWaitAttemptKilledTransition implements
MultipleArcTransition<TaskImpl, TaskEvent, TaskStateInternal> {
protected TaskStateInternal finalState = TaskStateInternal.KILLED;
protected final TaskAttemptCompletionEventStatus taCompletionEventStatus;
public KillWaitAttemptKilledTransition() {
this(TaskAttemptCompletionEventStatus.KILLED);
}
public KillWaitAttemptKilledTransition(
TaskAttemptCompletionEventStatus taCompletionEventStatus) {
this.taCompletionEventStatus = taCompletionEventStatus;
}
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId taskAttemptId =
((TaskTAttemptEvent) event).getTaskAttemptID();
task.handleTaskAttemptCompletion(taskAttemptId, taCompletionEventStatus);
task.finishedAttempts.add(taskAttemptId);
// check whether all attempts are finished
if (task.finishedAttempts.size() == task.attempts.size()) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
finalState, null); // TODO JH verify failedAttempt null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, getExternalState(finalState)));
return finalState;
}
return task.getInternalState();
}
}
private static class KillWaitAttemptSucceededTransition extends
KillWaitAttemptKilledTransition {
public KillWaitAttemptSucceededTransition() {
super(TaskAttemptCompletionEventStatus.SUCCEEDED);
}
}
private static class KillWaitAttemptFailedTransition extends
KillWaitAttemptKilledTransition {
public KillWaitAttemptFailedTransition() {
super(TaskAttemptCompletionEventStatus.FAILED);
}
}
private static class AttemptFailedTransition implements
MultipleArcTransition<TaskImpl, TaskEvent, TaskStateInternal> {
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
task.failedAttempts.add(taskAttemptId);
if (taskAttemptId.equals(task.commitAttempt)) {
task.commitAttempt = null;
}
TaskAttempt attempt = task.attempts.get(taskAttemptId);
if (attempt.getAssignedContainerMgrAddress() != null) {
//container was assigned
task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(),
attempt.getAssignedContainerMgrAddress()));
}
task.finishedAttempts.add(taskAttemptId);
if (task.failedAttempts.size() < task.maxAttempts) {
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.FAILED);
// we don't need a new event if we already have a spare
task.inProgressAttempts.remove(taskAttemptId);
if (task.inProgressAttempts.size() == 0
&& task.successfulAttempt == null) {
task.addAndScheduleAttempt(Avataar.VIRGIN);
}
} else {
task.handleTaskAttemptCompletion(
taskAttemptId,
TaskAttemptCompletionEventStatus.TIPFAILED);
// issue kill to all non finished attempts
for (TaskAttempt taskAttempt : task.attempts.values()) {
task.killUnfinishedAttempt
(taskAttempt, "Task has failed. Killing attempt!");
}
task.inProgressAttempts.clear();
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
TaskStateInternal.FAILED, taskAttemptId);
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
} else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(
new JobTaskEvent(task.taskId, TaskState.FAILED));
return task.finished(TaskStateInternal.FAILED);
}
return getDefaultState(task);
}
protected TaskStateInternal getDefaultState(TaskImpl task) {
return task.getInternalState();
}
}
private static class RetroactiveFailureTransition
extends AttemptFailedTransition {
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
if (task.getInternalState() == TaskStateInternal.SUCCEEDED &&
!castEvent.getTaskAttemptID().equals(task.successfulAttempt)) {
// don't allow a different task attempt to override a previous
// succeeded state
task.finishedAttempts.add(castEvent.getTaskAttemptID());
task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
return TaskStateInternal.SUCCEEDED;
}
// a successful REDUCE task should not be overridden
//TODO: consider moving it to MapTaskImpl
if (!TaskType.MAP.equals(task.getType())) {
LOG.error("Unexpected event for REDUCE task " + event.getType());
task.internalError(event.getType());
}
// tell the job about the rescheduling
task.eventHandler.handle(
new JobMapTaskRescheduledEvent(task.taskId));
// super.transition is mostly coded for the case where an
// UNcompleted task failed. When a COMPLETED task retroactively
// fails, we have to let AttemptFailedTransition.transition
// believe that there's no redundancy.
unSucceed(task);
// fake increase in Uncomplete attempts for super.transition
task.inProgressAttempts.add(castEvent.getTaskAttemptID());
return super.transition(task, event);
}
@Override
protected TaskStateInternal getDefaultState(TaskImpl task) {
return TaskStateInternal.SCHEDULED;
}
}
private static class RetroactiveKilledTransition implements
MultipleArcTransition<TaskImpl, TaskEvent, TaskStateInternal> {
@Override
public TaskStateInternal transition(TaskImpl task, TaskEvent event) {
TaskAttemptId attemptId = null;
if (event instanceof TaskTAttemptEvent) {
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
attemptId = castEvent.getTaskAttemptID();
if (task.getInternalState() == TaskStateInternal.SUCCEEDED &&
!attemptId.equals(task.successfulAttempt)) {
// don't allow a different task attempt to override a previous
// succeeded state
task.finishedAttempts.add(castEvent.getTaskAttemptID());
task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
return TaskStateInternal.SUCCEEDED;
}
}
// a successful REDUCE task should not be overridden
// TODO: consider moving it to MapTaskImpl
if (!TaskType.MAP.equals(task.getType())) {
LOG.error("Unexpected event for REDUCE task " + event.getType());
task.internalError(event.getType());
}
// successful attempt is now killed. reschedule
// tell the job about the rescheduling
unSucceed(task);
task.handleTaskAttemptCompletion(attemptId,
TaskAttemptCompletionEventStatus.KILLED);
task.eventHandler.handle(new JobMapTaskRescheduledEvent(task.taskId));
// typically we are here because this map task was run on a bad node and
// we want to reschedule it on a different node.
// Depending on whether there are previous failed attempts or not this
// can SCHEDULE or RESCHEDULE the container allocate request. If this
// SCHEDULE's then the dataLocal hosts of this taskAttempt will be used
// from the map splitInfo. So the bad node might be sent as a location
// to the RM. But the RM would ignore that just like it would ignore
// currently pending container requests affinitized to bad nodes.
task.addAndScheduleAttempt(Avataar.VIRGIN);
return TaskStateInternal.SCHEDULED;
}
}
private static class AttemptSucceededAtSucceededTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
TaskTAttemptEvent castEvent = (TaskTAttemptEvent) event;
task.finishedAttempts.add(castEvent.getTaskAttemptID());
task.inProgressAttempts.remove(castEvent.getTaskAttemptID());
}
}
private static class KillNewTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
if (task.historyTaskStartGenerated) {
TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, null,
TaskStateInternal.KILLED, null); // TODO Verify failedAttemptId is null
task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
taskFailedEvent));
}else {
LOG.debug("Not generating HistoryFinish event since start event not" +
" generated for task: " + task.getID());
}
task.eventHandler.handle(new JobTaskEvent(task.taskId,
getExternalState(TaskStateInternal.KILLED)));
task.metrics.endWaitingTask(task);
}
}
private void killUnfinishedAttempt(TaskAttempt attempt, String logMsg) {
if (attempt != null && !attempt.isFinished()) {
eventHandler.handle(
new TaskAttemptKillEvent(attempt.getID(), logMsg));
}
}
private static class KillTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
// issue kill to all non finished attempts
for (TaskAttempt attempt : task.attempts.values()) {
task.killUnfinishedAttempt
(attempt, "Task KILL is received. Killing attempt!");
}
task.inProgressAttempts.clear();
}
}
static class LaunchTransition
implements SingleArcTransition<TaskImpl, TaskEvent> {
@Override
public void transition(TaskImpl task, TaskEvent event) {
task.metrics.launchedTask(task);
task.metrics.runningTask(task);
}
}
}
| 47,317
| 37.006426
| 139
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/MapTaskImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
@SuppressWarnings({ "rawtypes" })
public class MapTaskImpl extends TaskImpl {
private final TaskSplitMetaInfo taskSplitMetaInfo;
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
Path remoteJobConfFile, JobConf conf,
TaskSplitMetaInfo taskSplitMetaInfo,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
conf, taskAttemptListener, jobToken, credentials, clock,
appAttemptId, metrics, appContext);
this.taskSplitMetaInfo = taskSplitMetaInfo;
}
@Override
protected int getMaxAttempts() {
return conf.getInt(MRJobConfig.MAP_MAX_ATTEMPTS, 4);
}
@Override
protected TaskAttemptImpl createAttempt() {
return new MapTaskAttemptImpl(getID(), nextAttemptNumber,
eventHandler, jobFile,
partition, taskSplitMetaInfo, conf, taskAttemptListener,
jobToken, credentials, clock, appContext);
}
@Override
public TaskType getType() {
return TaskType.MAP;
}
protected TaskSplitMetaInfo getTaskSplitMetaInfo() {
return this.taskSplitMetaInfo;
}
/**
* @return a String formatted as a comma-separated list of splits.
*/
@Override
protected String getSplitsAsString() {
String[] splits = getTaskSplitMetaInfo().getLocations();
if (splits == null || splits.length == 0)
return "";
StringBuilder sb = new StringBuilder();
for (int i = 0; i < splits.length; i++) {
if (i != 0) sb.append(",");
sb.append(splits[i]);
}
return sb.toString();
}
}
| 3,376
| 35.311828
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapred.MapReduceChildJVM;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.TaskAttemptContextImpl;
import org.apache.hadoop.mapred.WrappedJvmID;
import org.apache.hadoop.mapred.WrappedProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.records.Avataar;
import org.apache.hadoop.mapreduce.v2.api.records.Locality;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterTaskAbortEvent;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttemptStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptRecoverEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.RackResolver;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Implementation of TaskAttempt interface.
*/
@SuppressWarnings({ "rawtypes" })
public abstract class TaskAttemptImpl implements
org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
EventHandler<TaskAttemptEvent> {
static final Counters EMPTY_COUNTERS = new Counters();
private static final Log LOG = LogFactory.getLog(TaskAttemptImpl.class);
private static final long MEMORY_SPLITS_RESOLUTION = 1024; //TODO Make configurable?
private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
protected final JobConf conf;
protected final Path jobFile;
protected final int partition;
protected EventHandler eventHandler;
private final TaskAttemptId attemptId;
private final Clock clock;
private final org.apache.hadoop.mapred.JobID oldJobId;
private final TaskAttemptListener taskAttemptListener;
private final Resource resourceCapability;
protected Set<String> dataLocalHosts;
protected Set<String> dataLocalRacks;
private final List<String> diagnostics = new ArrayList<String>();
private final Lock readLock;
private final Lock writeLock;
private final AppContext appContext;
private Credentials credentials;
private Token<JobTokenIdentifier> jobToken;
private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
private static String initialClasspath = null;
private static String initialAppClasspath = null;
private static Object commonContainerSpecLock = new Object();
private static ContainerLaunchContext commonContainerSpec = null;
private static final Object classpathLock = new Object();
private long launchTime;
private long finishTime;
private WrappedProgressSplitsBlock progressSplitBlock;
private int shufflePort = -1;
private String trackerName;
private int httpPort;
private Locality locality;
private Avataar avataar;
private static final CleanupContainerTransition
CLEANUP_CONTAINER_TRANSITION = new CleanupContainerTransition();
private static final MoveContainerToSucceededFinishingTransition
SUCCEEDED_FINISHING_TRANSITION =
new MoveContainerToSucceededFinishingTransition();
private static final MoveContainerToFailedFinishingTransition
FAILED_FINISHING_TRANSITION =
new MoveContainerToFailedFinishingTransition();
private static final ExitFinishingOnTimeoutTransition
FINISHING_ON_TIMEOUT_TRANSITION =
new ExitFinishingOnTimeoutTransition();
private static final FinalizeFailedTransition FINALIZE_FAILED_TRANSITION =
new FinalizeFailedTransition();
private static final DiagnosticInformationUpdater
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION
= new DiagnosticInformationUpdater();
private static final EnumSet<TaskAttemptEventType>
FAILED_KILLED_STATE_IGNORED_EVENTS = EnumSet.of(
TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_ASSIGNED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
TaskAttemptEventType.TA_UPDATE,
// Container launch events can arrive late
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE);
private static final StateMachineFactory
<TaskAttemptImpl, TaskAttemptStateInternal, TaskAttemptEventType, TaskAttemptEvent>
stateMachineFactory
= new StateMachineFactory
<TaskAttemptImpl, TaskAttemptStateInternal, TaskAttemptEventType, TaskAttemptEvent>
(TaskAttemptStateInternal.NEW)
// Transitions from the NEW state.
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.UNASSIGNED,
TaskAttemptEventType.TA_SCHEDULE, new RequestContainerTransition(false))
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.UNASSIGNED,
TaskAttemptEventType.TA_RESCHEDULE, new RequestContainerTransition(true))
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_KILL, new KilledTransition())
.addTransition(TaskAttemptStateInternal.NEW, TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, new FailedTransition())
.addTransition(TaskAttemptStateInternal.NEW,
EnumSet.of(TaskAttemptStateInternal.FAILED,
TaskAttemptStateInternal.KILLED,
TaskAttemptStateInternal.SUCCEEDED),
TaskAttemptEventType.TA_RECOVER, new RecoverTransition())
.addTransition(TaskAttemptStateInternal.NEW,
TaskAttemptStateInternal.NEW,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Transitions from the UNASSIGNED state.
.addTransition(TaskAttemptStateInternal.UNASSIGNED,
TaskAttemptStateInternal.ASSIGNED, TaskAttemptEventType.TA_ASSIGNED,
new ContainerAssignedTransition())
.addTransition(TaskAttemptStateInternal.UNASSIGNED, TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_KILL, new DeallocateContainerTransition(
TaskAttemptStateInternal.KILLED, true))
.addTransition(TaskAttemptStateInternal.UNASSIGNED, TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, new DeallocateContainerTransition(
TaskAttemptStateInternal.FAILED, true))
.addTransition(TaskAttemptStateInternal.UNASSIGNED,
TaskAttemptStateInternal.UNASSIGNED,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Transitions from the ASSIGNED state.
.addTransition(TaskAttemptStateInternal.ASSIGNED, TaskAttemptStateInternal.RUNNING,
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
new LaunchedContainerTransition())
.addTransition(TaskAttemptStateInternal.ASSIGNED, TaskAttemptStateInternal.ASSIGNED,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
.addTransition(TaskAttemptStateInternal.ASSIGNED, TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
new DeallocateContainerTransition(TaskAttemptStateInternal.FAILED, false))
.addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
FINALIZE_FAILED_TRANSITION)
.addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_KILL, CLEANUP_CONTAINER_TRANSITION)
.addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.ASSIGNED,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
CLEANUP_CONTAINER_TRANSITION)
// Transitions from RUNNING state.
.addTransition(TaskAttemptStateInternal.RUNNING, TaskAttemptStateInternal.RUNNING,
TaskAttemptEventType.TA_UPDATE, new StatusUpdater())
.addTransition(TaskAttemptStateInternal.RUNNING, TaskAttemptStateInternal.RUNNING,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// If no commit is required, task goes to finishing state
// This will give a chance for the container to exit by itself
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptEventType.TA_DONE, SUCCEEDED_FINISHING_TRANSITION)
// If commit is required, task goes through commit pending state.
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptEventType.TA_COMMIT_PENDING, new CommitPendingTransition())
// Failure handling while RUNNING
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT, CLEANUP_CONTAINER_TRANSITION)
//for handling container exit without sending the done or fail msg
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
FINALIZE_FAILED_TRANSITION)
// Timeout handling while RUNNING
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
// if container killed by AM shutting down
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
// Kill handling
.addTransition(TaskAttemptStateInternal.RUNNING,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_KILL,
CLEANUP_CONTAINER_TRANSITION)
// Transitions from SUCCESS_FINISHING_CONTAINER state
// When the container exits by itself, the notification of container
// completed event will be routed via NM -> RM -> AM.
// After MRAppMaster gets notification from RM, it will generate
// TA_CONTAINER_COMPLETED event.
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
new ExitFinishingOnContainerCompletedTransition())
// Given TA notifies task T_ATTEMPT_SUCCEEDED when it transitions to
// SUCCESS_FINISHING_CONTAINER, it is possible to receive the event
// TA_CONTAINER_CLEANED in the following scenario.
// 1. It is the last task for the job.
// 2. After the task receives T_ATTEMPT_SUCCEEDED, it will notify job.
// 3. Job will be marked completed.
// 4. As part of MRAppMaster's shutdown, all containers will be killed.
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
new ExitFinishingOnContainerCleanedupTransition())
// The client wants to kill the task. Given the task is in finishing
// state, it could go to succeeded state or killed state. If it is a
// reducer, it will go to succeeded state;
// otherwise, it goes to killed state.
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
EnumSet.of(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP),
TaskAttemptEventType.TA_KILL,
new KilledAfterSucceededFinishingTransition())
// The attempt stays in finishing state for too long
// Let us clean up the container
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_TIMED_OUT, FINISHING_ON_TIMEOUT_TRANSITION)
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// ignore-able events
.addTransition(TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
EnumSet.of(TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT))
// Transitions from FAIL_FINISHING_CONTAINER state
// When the container exits by itself, the notification of container
// completed event will be routed via NM -> RM -> AM.
// After MRAppMaster gets notification from RM, it will generate
// TA_CONTAINER_COMPLETED event.
.addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
new ExitFinishingOnContainerCompletedTransition())
// Given TA notifies task T_ATTEMPT_FAILED when it transitions to
// FAIL_FINISHING_CONTAINER, it is possible to receive the event
// TA_CONTAINER_CLEANED in the following scenario.
// 1. It is the last task attempt for the task.
// 2. After the task receives T_ATTEMPT_FAILED, it will notify job.
// 3. Job will be marked failed.
// 4. As part of MRAppMaster's shutdown, all containers will be killed.
.addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
new ExitFinishingOnContainerCleanedupTransition())
.addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_TIMED_OUT, FINISHING_ON_TIMEOUT_TRANSITION)
.addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// ignore-able events
.addTransition(TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT))
// Transitions from COMMIT_PENDING state
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.COMMIT_PENDING, TaskAttemptEventType.TA_UPDATE,
new StatusUpdater())
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.SUCCESS_FINISHING_CONTAINER,
TaskAttemptEventType.TA_DONE, SUCCEEDED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_KILL,
CLEANUP_CONTAINER_TRANSITION)
// if container killed by AM shutting down
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_CONTAINER_CLEANED, new KilledTransition())
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAIL_FINISHING_CONTAINER,
TaskAttemptEventType.TA_FAILMSG, FAILED_FINISHING_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
CLEANUP_CONTAINER_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
FINALIZE_FAILED_TRANSITION)
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
// AM is likely to receive duplicate TA_COMMIT_PENDINGs as the task attempt
// will re-send the commit message until it doesn't encounter any
// IOException and succeeds in delivering the commit message.
// Ignoring the duplicate commit message is a short-term fix. In long term,
// we need to make use of retry cache to help this and other MR protocol
// APIs that can be considered as @AtMostOnce.
.addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptStateInternal.COMMIT_PENDING,
TaskAttemptEventType.TA_COMMIT_PENDING)
// Transitions from SUCCESS_CONTAINER_CLEANUP state
// kill and cleanup the container
.addTransition(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptEventType.TA_CONTAINER_CLEANED)
.addTransition(
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events
.addTransition(TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_CONTAINER_COMPLETED))
// Transitions from FAIL_CONTAINER_CLEANUP state.
.addTransition(TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition())
.addTransition(TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events
.addTransition(TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_COMMIT_PENDING,
// Container launch events can arrive late
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT))
// Transitions from KILL_CONTAINER_CLEANUP
.addTransition(TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.KILL_TASK_CLEANUP,
TaskAttemptEventType.TA_CONTAINER_CLEANED, new TaskCleanupTransition())
.addTransition(TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events
.addTransition(
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_TIMED_OUT))
// Transitions from FAIL_TASK_CLEANUP
// run the task cleanup
.addTransition(TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
TaskAttemptStateInternal.FAILED, TaskAttemptEventType.TA_CLEANUP_DONE,
new FailedTransition())
.addTransition(TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events
.addTransition(TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
TaskAttemptStateInternal.FAIL_TASK_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
// Container launch events can arrive late
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED))
// Transitions from KILL_TASK_CLEANUP
.addTransition(TaskAttemptStateInternal.KILL_TASK_CLEANUP,
TaskAttemptStateInternal.KILLED, TaskAttemptEventType.TA_CLEANUP_DONE,
new KilledTransition())
.addTransition(TaskAttemptStateInternal.KILL_TASK_CLEANUP,
TaskAttemptStateInternal.KILL_TASK_CLEANUP,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events
.addTransition(TaskAttemptStateInternal.KILL_TASK_CLEANUP,
TaskAttemptStateInternal.KILL_TASK_CLEANUP,
EnumSet.of(TaskAttemptEventType.TA_KILL,
TaskAttemptEventType.TA_CONTAINER_COMPLETED,
TaskAttemptEventType.TA_UPDATE,
TaskAttemptEventType.TA_COMMIT_PENDING,
TaskAttemptEventType.TA_DONE,
TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
// Container launch events can arrive late
TaskAttemptEventType.TA_CONTAINER_LAUNCHED,
TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED))
// Transitions from SUCCEEDED
.addTransition(TaskAttemptStateInternal.SUCCEEDED, //only possible for map attempts
TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE,
new TooManyFetchFailureTransition())
.addTransition(TaskAttemptStateInternal.SUCCEEDED,
EnumSet.of(TaskAttemptStateInternal.SUCCEEDED, TaskAttemptStateInternal.KILLED),
TaskAttemptEventType.TA_KILL,
new KilledAfterSuccessTransition())
.addTransition(
TaskAttemptStateInternal.SUCCEEDED, TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events for SUCCEEDED state
.addTransition(TaskAttemptStateInternal.SUCCEEDED,
TaskAttemptStateInternal.SUCCEEDED,
EnumSet.of(TaskAttemptEventType.TA_FAILMSG,
TaskAttemptEventType.TA_FAILMSG_BY_CLIENT,
// TaskAttemptFinishingMonitor might time out the attempt right
// after the attempt receives TA_CONTAINER_COMPLETED.
TaskAttemptEventType.TA_TIMED_OUT,
TaskAttemptEventType.TA_CONTAINER_CLEANED,
TaskAttemptEventType.TA_CONTAINER_COMPLETED))
// Transitions from FAILED state
.addTransition(TaskAttemptStateInternal.FAILED, TaskAttemptStateInternal.FAILED,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events for FAILED state
.addTransition(TaskAttemptStateInternal.FAILED, TaskAttemptStateInternal.FAILED,
FAILED_KILLED_STATE_IGNORED_EVENTS)
// Transitions from KILLED state
.addTransition(TaskAttemptStateInternal.KILLED, TaskAttemptStateInternal.KILLED,
TaskAttemptEventType.TA_DIAGNOSTICS_UPDATE,
DIAGNOSTIC_INFORMATION_UPDATE_TRANSITION)
// Ignore-able events for KILLED state
.addTransition(TaskAttemptStateInternal.KILLED, TaskAttemptStateInternal.KILLED,
FAILED_KILLED_STATE_IGNORED_EVENTS)
// create the topology tables
.installTopology();
private final StateMachine
<TaskAttemptStateInternal, TaskAttemptEventType, TaskAttemptEvent>
stateMachine;
@VisibleForTesting
public Container container;
private String nodeRackName;
private WrappedJvmID jvmID;
//this takes good amount of memory ~ 30KB. Instantiate it lazily
//and make it null once task is launched.
private org.apache.hadoop.mapred.Task remoteTask;
//this is the last status reported by the REMOTE running attempt
private TaskAttemptStatus reportedStatus;
private static final String LINE_SEPARATOR = System
.getProperty("line.separator");
public TaskAttemptImpl(TaskId taskId, int i,
EventHandler eventHandler,
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
JobConf conf, String[] dataLocalHosts,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext) {
oldJobId = TypeConverter.fromYarn(taskId.getJobId());
this.conf = conf;
this.clock = clock;
attemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
attemptId.setTaskId(taskId);
attemptId.setId(i);
this.taskAttemptListener = taskAttemptListener;
this.appContext = appContext;
// Initialize reportedStatus
reportedStatus = new TaskAttemptStatus();
initTaskAttemptStatus(reportedStatus);
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
this.credentials = credentials;
this.jobToken = jobToken;
this.eventHandler = eventHandler;
this.jobFile = jobFile;
this.partition = partition;
//TODO:create the resource reqt for this Task attempt
this.resourceCapability = recordFactory.newRecordInstance(Resource.class);
this.resourceCapability.setMemory(
getMemoryRequired(conf, taskId.getTaskType()));
this.resourceCapability.setVirtualCores(
getCpuRequired(conf, taskId.getTaskType()));
this.dataLocalHosts = resolveHosts(dataLocalHosts);
RackResolver.init(conf);
this.dataLocalRacks = new HashSet<String>();
for (String host : this.dataLocalHosts) {
this.dataLocalRacks.add(RackResolver.resolve(host).getNetworkLocation());
}
locality = Locality.OFF_SWITCH;
avataar = Avataar.VIRGIN;
// This "this leak" is okay because the retained pointer is in an
// instance variable.
stateMachine = stateMachineFactory.make(this);
}
private int getMemoryRequired(Configuration conf, TaskType taskType) {
int memory = 1024;
if (taskType == TaskType.MAP) {
memory =
conf.getInt(MRJobConfig.MAP_MEMORY_MB,
MRJobConfig.DEFAULT_MAP_MEMORY_MB);
} else if (taskType == TaskType.REDUCE) {
memory =
conf.getInt(MRJobConfig.REDUCE_MEMORY_MB,
MRJobConfig.DEFAULT_REDUCE_MEMORY_MB);
}
return memory;
}
private int getCpuRequired(Configuration conf, TaskType taskType) {
int vcores = 1;
if (taskType == TaskType.MAP) {
vcores =
conf.getInt(MRJobConfig.MAP_CPU_VCORES,
MRJobConfig.DEFAULT_MAP_CPU_VCORES);
} else if (taskType == TaskType.REDUCE) {
vcores =
conf.getInt(MRJobConfig.REDUCE_CPU_VCORES,
MRJobConfig.DEFAULT_REDUCE_CPU_VCORES);
}
return vcores;
}
/**
* Create a {@link LocalResource} record with all the given parameters.
*/
private static LocalResource createLocalResource(FileSystem fc, Path file,
LocalResourceType type, LocalResourceVisibility visibility)
throws IOException {
FileStatus fstat = fc.getFileStatus(file);
URL resourceURL = ConverterUtils.getYarnUrlFromPath(fc.resolvePath(fstat
.getPath()));
long resourceSize = fstat.getLen();
long resourceModificationTime = fstat.getModificationTime();
return LocalResource.newInstance(resourceURL, type, visibility,
resourceSize, resourceModificationTime);
}
/**
* Lock this on initialClasspath so that there is only one fork in the AM for
* getting the initial class-path. TODO: We already construct
* a parent CLC and use it for all the containers, so this should go away
* once the mr-generated-classpath stuff is gone.
*/
private static String getInitialClasspath(Configuration conf) throws IOException {
synchronized (classpathLock) {
if (initialClasspathFlag.get()) {
return initialClasspath;
}
Map<String, String> env = new HashMap<String, String>();
MRApps.setClasspath(env, conf);
initialClasspath = env.get(Environment.CLASSPATH.name());
initialAppClasspath = env.get(Environment.APP_CLASSPATH.name());
initialClasspathFlag.set(true);
return initialClasspath;
}
}
/**
* Create the common {@link ContainerLaunchContext} for all attempts.
* @param applicationACLs
*/
private static ContainerLaunchContext createCommonContainerLaunchContext(
Map<ApplicationAccessType, String> applicationACLs, Configuration conf,
Token<JobTokenIdentifier> jobToken,
final org.apache.hadoop.mapred.JobID oldJobId,
Credentials credentials) {
// Application resources
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
// Application environment
Map<String, String> environment = new HashMap<String, String>();
// Service data
Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
// Tokens
ByteBuffer taskCredentialsBuffer = ByteBuffer.wrap(new byte[]{});
try {
FileSystem remoteFS = FileSystem.get(conf);
// //////////// Set up JobJar to be localized properly on the remote NM.
String jobJar = conf.get(MRJobConfig.JAR);
if (jobJar != null) {
final Path jobJarPath = new Path(jobJar);
final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf);
Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(),
jobJarFs.getWorkingDirectory());
LocalResource rc = createLocalResource(jobJarFs, remoteJobJar,
LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION);
String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN,
JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
rc.setPattern(pattern);
localResources.put(MRJobConfig.JOB_JAR, rc);
LOG.info("The job-jar file on the remote FS is "
+ remoteJobJar.toUri().toASCIIString());
} else {
// Job jar may be null. For e.g, for pipes, the job jar is the hadoop
// mapreduce jar itself which is already on the classpath.
LOG.info("Job jar is not present. "
+ "Not adding any jar to the list of resources.");
}
// //////////// End of JobJar setup
// //////////// Set up JobConf to be localized properly on the remote NM.
Path path =
MRApps.getStagingAreaDir(conf, UserGroupInformation
.getCurrentUser().getShortUserName());
Path remoteJobSubmitDir =
new Path(path, oldJobId.toString());
Path remoteJobConfPath =
new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
localResources.put(
MRJobConfig.JOB_CONF_FILE,
createLocalResource(remoteFS, remoteJobConfPath,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
LOG.info("The job-conf file on the remote FS is "
+ remoteJobConfPath.toUri().toASCIIString());
// //////////// End of JobConf setup
// Setup DistributedCache
MRApps.setupDistributedCache(conf, localResources);
// Setup up task credentials buffer
LOG.info("Adding #" + credentials.numberOfTokens()
+ " tokens and #" + credentials.numberOfSecretKeys()
+ " secret keys for NM use for launching container");
Credentials taskCredentials = new Credentials(credentials);
// LocalStorageToken is needed irrespective of whether security is enabled
// or not.
TokenCache.setJobToken(jobToken, taskCredentials);
DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
LOG.info("Size of containertokens_dob is "
+ taskCredentials.numberOfTokens());
taskCredentials.writeTokenStorageToStream(containerTokens_dob);
taskCredentialsBuffer =
ByteBuffer.wrap(containerTokens_dob.getData(), 0,
containerTokens_dob.getLength());
// Add shuffle secret key
// The secret key is converted to a JobToken to preserve backwards
// compatibility with an older ShuffleHandler running on an NM.
LOG.info("Putting shuffle token in serviceData");
byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
if (shuffleSecret == null) {
LOG.warn("Cannot locate shuffle secret in credentials."
+ " Using job token as shuffle secret.");
shuffleSecret = jobToken.getPassword();
}
Token<JobTokenIdentifier> shuffleToken = new Token<JobTokenIdentifier>(
jobToken.getIdentifier(), shuffleSecret, jobToken.getKind(),
jobToken.getService());
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeServiceData(shuffleToken));
// add external shuffle-providers - if any
Collection<String> shuffleProviders = conf.getStringCollection(
MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES);
if (! shuffleProviders.isEmpty()) {
Collection<String> auxNames = conf.getStringCollection(
YarnConfiguration.NM_AUX_SERVICES);
for (final String shuffleProvider : shuffleProviders) {
if (shuffleProvider.equals(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID)) {
continue; // skip built-in shuffle-provider that was already inserted with shuffle secret key
}
if (auxNames.contains(shuffleProvider)) {
LOG.info("Adding ShuffleProvider Service: " + shuffleProvider + " to serviceData");
// This only serves for INIT_APP notifications
// The shuffle service needs to be able to work with the host:port information provided by the AM
// (i.e. shuffle services which require custom location / other configuration are not supported)
serviceData.put(shuffleProvider, ByteBuffer.allocate(0));
}
else {
throw new YarnRuntimeException("ShuffleProvider Service: " + shuffleProvider +
" was NOT found in the list of aux-services that are available in this NM." +
" You may need to specify this ShuffleProvider as an aux-service in your yarn-site.xml");
}
}
}
MRApps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
getInitialClasspath(conf), conf);
if (initialAppClasspath != null) {
MRApps.addToEnvironment(
environment,
Environment.APP_CLASSPATH.name(),
initialAppClasspath, conf);
}
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
// Shell
environment.put(
Environment.SHELL.name(),
conf.get(
MRJobConfig.MAPRED_ADMIN_USER_SHELL,
MRJobConfig.DEFAULT_SHELL)
);
// Add pwd to LD_LIBRARY_PATH, add this before adding anything else
MRApps.addToEnvironment(
environment,
Environment.LD_LIBRARY_PATH.name(),
MRApps.crossPlatformifyMREnv(conf, Environment.PWD), conf);
// Add the env variables passed by the admin
MRApps.setEnvFromInputString(
environment,
conf.get(
MRJobConfig.MAPRED_ADMIN_USER_ENV,
MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV), conf
);
// Construct the actual Container
// The null fields are per-container and will be constructed for each
// container separately.
ContainerLaunchContext container =
ContainerLaunchContext.newInstance(localResources, environment, null,
serviceData, taskCredentialsBuffer, applicationACLs);
return container;
}
static ContainerLaunchContext createContainerLaunchContext(
Map<ApplicationAccessType, String> applicationACLs,
Configuration conf, Token<JobTokenIdentifier> jobToken, Task remoteTask,
final org.apache.hadoop.mapred.JobID oldJobId,
WrappedJvmID jvmID,
TaskAttemptListener taskAttemptListener,
Credentials credentials) {
synchronized (commonContainerSpecLock) {
if (commonContainerSpec == null) {
commonContainerSpec = createCommonContainerLaunchContext(
applicationACLs, conf, jobToken, oldJobId, credentials);
}
}
// Fill in the fields needed per-container that are missing in the common
// spec.
boolean userClassesTakesPrecedence =
conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
// Setup environment by cloning from common env.
Map<String, String> env = commonContainerSpec.getEnvironment();
Map<String, String> myEnv = new HashMap<String, String>(env.size());
myEnv.putAll(env);
if (userClassesTakesPrecedence) {
myEnv.put(Environment.CLASSPATH_PREPEND_DISTCACHE.name(), "true");
}
MapReduceChildJVM.setVMEnv(myEnv, remoteTask);
// Set up the launch command
List<String> commands = MapReduceChildJVM.getVMCommand(
taskAttemptListener.getAddress(), remoteTask, jvmID);
// Duplicate the ByteBuffers for access by multiple containers.
Map<String, ByteBuffer> myServiceData = new HashMap<String, ByteBuffer>();
for (Entry<String, ByteBuffer> entry : commonContainerSpec
.getServiceData().entrySet()) {
myServiceData.put(entry.getKey(), entry.getValue().duplicate());
}
// Construct the actual Container
ContainerLaunchContext container = ContainerLaunchContext.newInstance(
commonContainerSpec.getLocalResources(), myEnv, commands,
myServiceData, commonContainerSpec.getTokens().duplicate(),
applicationACLs);
return container;
}
@Override
public ContainerId getAssignedContainerID() {
readLock.lock();
try {
return container == null ? null : container.getId();
} finally {
readLock.unlock();
}
}
@Override
public String getAssignedContainerMgrAddress() {
readLock.lock();
try {
return container == null ? null : StringInterner.weakIntern(container
.getNodeId().toString());
} finally {
readLock.unlock();
}
}
@Override
public long getLaunchTime() {
readLock.lock();
try {
return launchTime;
} finally {
readLock.unlock();
}
}
@Override
public long getFinishTime() {
readLock.lock();
try {
return finishTime;
} finally {
readLock.unlock();
}
}
@Override
public long getShuffleFinishTime() {
readLock.lock();
try {
return this.reportedStatus.shuffleFinishTime;
} finally {
readLock.unlock();
}
}
@Override
public long getSortFinishTime() {
readLock.lock();
try {
return this.reportedStatus.sortFinishTime;
} finally {
readLock.unlock();
}
}
@Override
public int getShufflePort() {
readLock.lock();
try {
return shufflePort;
} finally {
readLock.unlock();
}
}
@Override
public NodeId getNodeId() {
readLock.lock();
try {
return container == null ? null : container.getNodeId();
} finally {
readLock.unlock();
}
}
/**If container Assigned then return the node's address, otherwise null.
*/
@Override
public String getNodeHttpAddress() {
readLock.lock();
try {
return container == null ? null : container.getNodeHttpAddress();
} finally {
readLock.unlock();
}
}
/**
* If container Assigned then return the node's rackname, otherwise null.
*/
@Override
public String getNodeRackName() {
this.readLock.lock();
try {
return this.nodeRackName;
} finally {
this.readLock.unlock();
}
}
protected abstract org.apache.hadoop.mapred.Task createRemoteTask();
@Override
public TaskAttemptId getID() {
return attemptId;
}
@Override
public boolean isFinished() {
readLock.lock();
try {
// TODO: Use stateMachine level method?
return (getInternalState() == TaskAttemptStateInternal.SUCCEEDED ||
getInternalState() == TaskAttemptStateInternal.FAILED ||
getInternalState() == TaskAttemptStateInternal.KILLED);
} finally {
readLock.unlock();
}
}
@Override
public TaskAttemptReport getReport() {
TaskAttemptReport result = recordFactory.newRecordInstance(TaskAttemptReport.class);
readLock.lock();
try {
result.setTaskAttemptId(attemptId);
//take the LOCAL state of attempt
//DO NOT take from reportedStatus
result.setTaskAttemptState(getState());
result.setProgress(reportedStatus.progress);
result.setStartTime(launchTime);
result.setFinishTime(finishTime);
result.setShuffleFinishTime(this.reportedStatus.shuffleFinishTime);
result.setDiagnosticInfo(StringUtils.join(LINE_SEPARATOR, getDiagnostics()));
result.setPhase(reportedStatus.phase);
result.setStateString(reportedStatus.stateString);
result.setCounters(TypeConverter.toYarn(getCounters()));
result.setContainerId(this.getAssignedContainerID());
result.setNodeManagerHost(trackerName);
result.setNodeManagerHttpPort(httpPort);
if (this.container != null) {
result.setNodeManagerPort(this.container.getNodeId().getPort());
}
return result;
} finally {
readLock.unlock();
}
}
@Override
public List<String> getDiagnostics() {
List<String> result = new ArrayList<String>();
readLock.lock();
try {
result.addAll(diagnostics);
return result;
} finally {
readLock.unlock();
}
}
@Override
public Counters getCounters() {
readLock.lock();
try {
Counters counters = reportedStatus.counters;
if (counters == null) {
counters = EMPTY_COUNTERS;
}
return counters;
} finally {
readLock.unlock();
}
}
@Override
public float getProgress() {
readLock.lock();
try {
return reportedStatus.progress;
} finally {
readLock.unlock();
}
}
@Override
public Phase getPhase() {
readLock.lock();
try {
return reportedStatus.phase;
} finally {
readLock.unlock();
}
}
@Override
public TaskAttemptState getState() {
readLock.lock();
try {
return getExternalState(stateMachine.getCurrentState());
} finally {
readLock.unlock();
}
}
@SuppressWarnings("unchecked")
@Override
public void handle(TaskAttemptEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getTaskAttemptID() + " of type "
+ event.getType());
}
writeLock.lock();
try {
final TaskAttemptStateInternal oldState = getInternalState() ;
try {
stateMachine.doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state for "
+ this.attemptId, e);
eventHandler.handle(new JobDiagnosticsUpdateEvent(
this.attemptId.getTaskId().getJobId(), "Invalid event " + event.getType() +
" on TaskAttempt " + this.attemptId));
eventHandler.handle(new JobEvent(this.attemptId.getTaskId().getJobId(),
JobEventType.INTERNAL_ERROR));
}
if (oldState != getInternalState()) {
LOG.info(attemptId + " TaskAttempt Transitioned from "
+ oldState + " to "
+ getInternalState());
}
} finally {
writeLock.unlock();
}
}
@VisibleForTesting
public TaskAttemptStateInternal getInternalState() {
readLock.lock();
try {
return stateMachine.getCurrentState();
} finally {
readLock.unlock();
}
}
public Locality getLocality() {
return locality;
}
public void setLocality(Locality locality) {
this.locality = locality;
}
public Avataar getAvataar()
{
return avataar;
}
public void setAvataar(Avataar avataar) {
this.avataar = avataar;
}
@SuppressWarnings("unchecked")
public TaskAttemptStateInternal recover(TaskAttemptInfo taInfo,
OutputCommitter committer, boolean recoverOutput) {
ContainerId containerId = taInfo.getContainerId();
NodeId containerNodeId = ConverterUtils.toNodeId(taInfo.getHostname() + ":"
+ taInfo.getPort());
String nodeHttpAddress = StringInterner.weakIntern(taInfo.getHostname() + ":"
+ taInfo.getHttpPort());
// Resource/Priority/Tokens are only needed while launching the container on
// an NM, these are already completed tasks, so setting them to null
container =
Container.newInstance(containerId, containerNodeId,
nodeHttpAddress, null, null, null);
computeRackAndLocality();
launchTime = taInfo.getStartTime();
finishTime = (taInfo.getFinishTime() != -1) ?
taInfo.getFinishTime() : clock.getTime();
shufflePort = taInfo.getShufflePort();
trackerName = taInfo.getHostname();
httpPort = taInfo.getHttpPort();
sendLaunchedEvents();
reportedStatus.id = attemptId;
reportedStatus.progress = 1.0f;
reportedStatus.counters = taInfo.getCounters();
reportedStatus.stateString = taInfo.getState();
reportedStatus.phase = Phase.CLEANUP;
reportedStatus.mapFinishTime = taInfo.getMapFinishTime();
reportedStatus.shuffleFinishTime = taInfo.getShuffleFinishTime();
reportedStatus.sortFinishTime = taInfo.getSortFinishTime();
addDiagnosticInfo(taInfo.getError());
boolean needToClean = false;
String recoveredState = taInfo.getTaskStatus();
if (recoverOutput
&& TaskAttemptState.SUCCEEDED.toString().equals(recoveredState)) {
TaskAttemptContext tac = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attemptId));
try {
committer.recoverTask(tac);
LOG.info("Recovered output from task attempt " + attemptId);
} catch (Exception e) {
LOG.error("Unable to recover task attempt " + attemptId, e);
LOG.info("Task attempt " + attemptId + " will be recovered as KILLED");
recoveredState = TaskAttemptState.KILLED.toString();
needToClean = true;
}
}
TaskAttemptStateInternal attemptState;
if (TaskAttemptState.SUCCEEDED.toString().equals(recoveredState)) {
attemptState = TaskAttemptStateInternal.SUCCEEDED;
reportedStatus.taskState = TaskAttemptState.SUCCEEDED;
eventHandler.handle(createJobCounterUpdateEventTASucceeded(this));
logAttemptFinishedEvent(attemptState);
} else if (TaskAttemptState.FAILED.toString().equals(recoveredState)) {
attemptState = TaskAttemptStateInternal.FAILED;
reportedStatus.taskState = TaskAttemptState.FAILED;
eventHandler.handle(createJobCounterUpdateEventTAFailed(this, false));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(this,
TaskAttemptStateInternal.FAILED);
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tauce));
} else {
if (!TaskAttemptState.KILLED.toString().equals(recoveredState)) {
if (String.valueOf(recoveredState).isEmpty()) {
LOG.info("TaskAttempt" + attemptId
+ " had not completed, recovering as KILLED");
} else {
LOG.warn("TaskAttempt " + attemptId + " found in unexpected state "
+ recoveredState + ", recovering as KILLED");
}
addDiagnosticInfo("Killed during application recovery");
needToClean = true;
}
attemptState = TaskAttemptStateInternal.KILLED;
reportedStatus.taskState = TaskAttemptState.KILLED;
eventHandler.handle(createJobCounterUpdateEventTAKilled(this, false));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(this,
TaskAttemptStateInternal.KILLED);
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tauce));
}
if (needToClean) {
TaskAttemptContext tac = new TaskAttemptContextImpl(conf,
TypeConverter.fromYarn(attemptId));
try {
committer.abortTask(tac);
} catch (Exception e) {
LOG.warn("Task cleanup failed for attempt " + attemptId, e);
}
}
return attemptState;
}
private static TaskAttemptState getExternalState(
TaskAttemptStateInternal smState) {
switch (smState) {
case ASSIGNED:
case UNASSIGNED:
return TaskAttemptState.STARTING;
case COMMIT_PENDING:
return TaskAttemptState.COMMIT_PENDING;
case FAIL_CONTAINER_CLEANUP:
case FAIL_TASK_CLEANUP:
case FAIL_FINISHING_CONTAINER:
case FAILED:
return TaskAttemptState.FAILED;
case KILL_CONTAINER_CLEANUP:
case KILL_TASK_CLEANUP:
case KILLED:
return TaskAttemptState.KILLED;
case RUNNING:
return TaskAttemptState.RUNNING;
case NEW:
return TaskAttemptState.NEW;
case SUCCESS_CONTAINER_CLEANUP:
case SUCCESS_FINISHING_CONTAINER:
case SUCCEEDED:
return TaskAttemptState.SUCCEEDED;
default:
throw new YarnRuntimeException("Attempt to convert invalid "
+ "stateMachineTaskAttemptState to externalTaskAttemptState: "
+ smState);
}
}
//always called in write lock
private void setFinishTime() {
//set the finish time only if launch time is set
if (launchTime != 0) {
finishTime = clock.getTime();
}
}
private void computeRackAndLocality() {
NodeId containerNodeId = container.getNodeId();
nodeRackName = RackResolver.resolve(
containerNodeId.getHost()).getNetworkLocation();
locality = Locality.OFF_SWITCH;
if (dataLocalHosts.size() > 0) {
String cHost = resolveHost(containerNodeId.getHost());
if (dataLocalHosts.contains(cHost)) {
locality = Locality.NODE_LOCAL;
}
}
if (locality == Locality.OFF_SWITCH) {
if (dataLocalRacks.contains(nodeRackName)) {
locality = Locality.RACK_LOCAL;
}
}
}
private static void updateMillisCounters(JobCounterUpdateEvent jce,
TaskAttemptImpl taskAttempt) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
int mbRequired =
taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);
int minSlotMemSize = taskAttempt.conf.getInt(
YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
int simSlotsRequired =
minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
/ minSlotMemSize);
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
} else {
jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
}
}
private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
TaskAttemptImpl taskAttempt) {
TaskId taskId = taskAttempt.attemptId.getTaskId();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
updateMillisCounters(jce, taskAttempt);
return jce;
}
private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
} else {
jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
}
if (!taskAlreadyCompleted) {
updateMillisCounters(jce, taskAttempt);
}
return jce;
}
private static JobCounterUpdateEvent createJobCounterUpdateEventTAKilled(
TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
if (taskType == TaskType.MAP) {
jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
} else {
jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
}
if (!taskAlreadyCompleted) {
updateMillisCounters(jce, taskAttempt);
}
return jce;
}
private static
TaskAttemptUnsuccessfulCompletionEvent
createTaskAttemptUnsuccessfulCompletionEvent(TaskAttemptImpl taskAttempt,
TaskAttemptStateInternal attemptState) {
TaskAttemptUnsuccessfulCompletionEvent tauce =
new TaskAttemptUnsuccessfulCompletionEvent(
TypeConverter.fromYarn(taskAttempt.attemptId),
TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId()
.getTaskType()), attemptState.toString(),
taskAttempt.finishTime,
taskAttempt.container == null ? "UNKNOWN"
: taskAttempt.container.getNodeId().getHost(),
taskAttempt.container == null ? -1
: taskAttempt.container.getNodeId().getPort(),
taskAttempt.nodeRackName == null ? "UNKNOWN"
: taskAttempt.nodeRackName,
StringUtils.join(
LINE_SEPARATOR, taskAttempt.getDiagnostics()),
taskAttempt.getCounters(), taskAttempt
.getProgressSplitBlock().burst());
return tauce;
}
@SuppressWarnings("unchecked")
private void sendLaunchedEvents() {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptId.getTaskId()
.getJobId());
jce.addCounterUpdate(attemptId.getTaskId().getTaskType() == TaskType.MAP ?
JobCounter.TOTAL_LAUNCHED_MAPS : JobCounter.TOTAL_LAUNCHED_REDUCES, 1);
eventHandler.handle(jce);
LOG.info("TaskAttempt: [" + attemptId
+ "] using containerId: [" + container.getId() + " on NM: ["
+ StringInterner.weakIntern(container.getNodeId().toString()) + "]");
TaskAttemptStartedEvent tase =
new TaskAttemptStartedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
launchTime, trackerName, httpPort, shufflePort, container.getId(),
locality.toString(), avataar.toString());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), tase));
}
private WrappedProgressSplitsBlock getProgressSplitBlock() {
readLock.lock();
try {
if (progressSplitBlock == null) {
progressSplitBlock = new WrappedProgressSplitsBlock(conf.getInt(
MRJobConfig.MR_AM_NUM_PROGRESS_SPLITS,
MRJobConfig.DEFAULT_MR_AM_NUM_PROGRESS_SPLITS));
}
return progressSplitBlock;
} finally {
readLock.unlock();
}
}
private void updateProgressSplits() {
double newProgress = reportedStatus.progress;
newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D);
Counters counters = reportedStatus.counters;
if (counters == null)
return;
WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock();
if (splitsBlock != null) {
long now = clock.getTime();
long start = getLaunchTime(); // TODO Ensure not 0
if (start != 0 && now - start <= Integer.MAX_VALUE) {
splitsBlock.getProgressWallclockTime().extend(newProgress,
(int) (now - start));
}
Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
splitsBlock.getProgressCPUTime().extend(newProgress,
(int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below
}
Counter virtualBytes = counters
.findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
if (virtualBytes != null) {
splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
(int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
}
Counter physicalBytes = counters
.findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
if (physicalBytes != null) {
splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
(int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
}
}
}
private static void finalizeProgress(TaskAttemptImpl taskAttempt) {
// unregister it to TaskAttemptListener so that it stops listening
taskAttempt.taskAttemptListener.unregister(
taskAttempt.attemptId, taskAttempt.jvmID);
taskAttempt.reportedStatus.progress = 1.0f;
taskAttempt.updateProgressSplits();
}
static class RequestContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
private final boolean rescheduled;
public RequestContainerTransition(boolean rescheduled) {
this.rescheduled = rescheduled;
}
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// Tell any speculator that we're requesting a container
taskAttempt.eventHandler.handle
(new SpeculatorEvent(taskAttempt.getID().getTaskId(), +1));
//request for container
if (rescheduled) {
taskAttempt.eventHandler.handle(
ContainerRequestEvent.createContainerRequestEventForFailedContainer(
taskAttempt.attemptId,
taskAttempt.resourceCapability));
} else {
taskAttempt.eventHandler.handle(new ContainerRequestEvent(
taskAttempt.attemptId, taskAttempt.resourceCapability,
taskAttempt.dataLocalHosts.toArray(
new String[taskAttempt.dataLocalHosts.size()]),
taskAttempt.dataLocalRacks.toArray(
new String[taskAttempt.dataLocalRacks.size()])));
}
}
}
protected Set<String> resolveHosts(String[] src) {
Set<String> result = new HashSet<String>();
if (src != null) {
for (int i = 0; i < src.length; i++) {
if (src[i] == null) {
continue;
} else if (isIP(src[i])) {
result.add(resolveHost(src[i]));
} else {
result.add(src[i]);
}
}
}
return result;
}
protected String resolveHost(String src) {
String result = src; // Fallback in case of failure.
try {
InetAddress addr = InetAddress.getByName(src);
result = addr.getHostName();
} catch (UnknownHostException e) {
LOG.warn("Failed to resolve address: " + src
+ ". Continuing to use the same.");
}
return result;
}
private static final Pattern ipPattern = // Pattern for matching ip
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}");
protected boolean isIP(String src) {
return ipPattern.matcher(src).matches();
}
private static class ContainerAssignedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings({ "unchecked" })
@Override
public void transition(final TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
final TaskAttemptContainerAssignedEvent cEvent =
(TaskAttemptContainerAssignedEvent) event;
Container container = cEvent.getContainer();
taskAttempt.container = container;
// this is a _real_ Task (classic Hadoop mapred flavor):
taskAttempt.remoteTask = taskAttempt.createRemoteTask();
taskAttempt.jvmID =
new WrappedJvmID(taskAttempt.remoteTask.getTaskID().getJobID(),
taskAttempt.remoteTask.isMapTask(),
taskAttempt.container.getId().getContainerId());
taskAttempt.taskAttemptListener.registerPendingTask(
taskAttempt.remoteTask, taskAttempt.jvmID);
taskAttempt.computeRackAndLocality();
//launch the container
//create the container object to be launched for a given Task attempt
ContainerLaunchContext launchContext = createContainerLaunchContext(
cEvent.getApplicationACLs(), taskAttempt.conf, taskAttempt.jobToken,
taskAttempt.remoteTask, taskAttempt.oldJobId, taskAttempt.jvmID,
taskAttempt.taskAttemptListener, taskAttempt.credentials);
taskAttempt.eventHandler
.handle(new ContainerRemoteLaunchEvent(taskAttempt.attemptId,
launchContext, container, taskAttempt.remoteTask));
// send event to speculator that our container needs are satisfied
taskAttempt.eventHandler.handle
(new SpeculatorEvent(taskAttempt.getID().getTaskId(), -1));
}
}
private static class DeallocateContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
private final TaskAttemptStateInternal finalState;
private final boolean withdrawsContainerRequest;
DeallocateContainerTransition
(TaskAttemptStateInternal finalState, boolean withdrawsContainerRequest) {
this.finalState = finalState;
this.withdrawsContainerRequest = withdrawsContainerRequest;
}
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
//set the finish time
taskAttempt.setFinishTime();
if (event instanceof TaskAttemptKillEvent) {
taskAttempt.addDiagnosticInfo(
((TaskAttemptKillEvent) event).getMessage());
}
//send the deallocate event to ContainerAllocator
taskAttempt.eventHandler.handle(
new ContainerAllocatorEvent(taskAttempt.attemptId,
ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
// send event to speculator that we withdraw our container needs, if
// we're transitioning out of UNASSIGNED
if (withdrawsContainerRequest) {
taskAttempt.eventHandler.handle
(new SpeculatorEvent(taskAttempt.getID().getTaskId(), -1));
}
switch(finalState) {
case FAILED:
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_FAILED));
break;
case KILLED:
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_KILLED));
break;
default:
LOG.error("Task final state is not FAILED or KILLED: " + finalState);
}
if (taskAttempt.getLaunchTime() != 0) {
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
finalState);
if(finalState == TaskAttemptStateInternal.FAILED) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
} else if(finalState == TaskAttemptStateInternal.KILLED) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAKilled(taskAttempt, false));
}
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
} else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
}
}
private static class LaunchedContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent evnt) {
TaskAttemptContainerLaunchedEvent event =
(TaskAttemptContainerLaunchedEvent) evnt;
//set the launch time
taskAttempt.launchTime = taskAttempt.clock.getTime();
taskAttempt.shufflePort = event.getShufflePort();
// register it to TaskAttemptListener so that it can start monitoring it.
taskAttempt.taskAttemptListener
.registerLaunchedTask(taskAttempt.attemptId, taskAttempt.jvmID);
//TODO Resolve to host / IP in case of a local address.
InetSocketAddress nodeHttpInetAddr = // TODO: Costly to create sock-addr?
NetUtils.createSocketAddr(taskAttempt.container.getNodeHttpAddress());
taskAttempt.trackerName = nodeHttpInetAddr.getHostName();
taskAttempt.httpPort = nodeHttpInetAddr.getPort();
taskAttempt.sendLaunchedEvents();
taskAttempt.eventHandler.handle
(new SpeculatorEvent
(taskAttempt.attemptId, true, taskAttempt.clock.getTime()));
//make remoteTask reference as null as it is no more needed
//and free up the memory
taskAttempt.remoteTask = null;
//tell the Task that attempt has started
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_LAUNCHED));
}
}
private static class CommitPendingTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_COMMIT_PENDING));
}
}
private static class TaskCleanupTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
TaskAttemptContext taskContext =
new TaskAttemptContextImpl(taskAttempt.conf,
TypeConverter.fromYarn(taskAttempt.attemptId));
taskAttempt.eventHandler.handle(new CommitterTaskAbortEvent(
taskAttempt.attemptId, taskContext));
}
}
/**
* Transition from SUCCESS_FINISHING_CONTAINER or FAIL_FINISHING_CONTAINER
* state upon receiving TA_CONTAINER_COMPLETED event
*/
private static class ExitFinishingOnContainerCompletedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
taskAttempt.attemptId);
sendContainerCompleted(taskAttempt);
}
}
private static class ExitFinishingOnContainerCleanedupTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
taskAttempt.attemptId);
}
}
private static class FailedTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// set the finish time
taskAttempt.setFinishTime();
notifyTaskAttemptFailed(taskAttempt);
}
}
private static class FinalizeFailedTransition extends FailedTransition {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
finalizeProgress(taskAttempt);
sendContainerCompleted(taskAttempt);
super.transition(taskAttempt, event);
}
}
@SuppressWarnings("unchecked")
private static void sendContainerCompleted(TaskAttemptImpl taskAttempt) {
taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
taskAttempt.attemptId,
taskAttempt.container.getId(), StringInterner
.weakIntern(taskAttempt.container.getNodeId().toString()),
taskAttempt.container.getContainerToken(),
ContainerLauncher.EventType.CONTAINER_COMPLETED));
}
private static class RecoverTransition implements
MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent, TaskAttemptStateInternal> {
@Override
public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
TaskAttemptRecoverEvent tare = (TaskAttemptRecoverEvent) event;
return taskAttempt.recover(tare.getTaskAttemptInfo(),
tare.getCommitter(), tare.getRecoverOutput());
}
}
@SuppressWarnings({ "unchecked" })
private void logAttemptFinishedEvent(TaskAttemptStateInternal state) {
//Log finished events only if an attempt started.
if (getLaunchTime() == 0) return;
String containerHostName = this.container == null ? "UNKNOWN"
: this.container.getNodeId().getHost();
int containerNodePort =
this.container == null ? -1 : this.container.getNodeId().getPort();
if (attemptId.getTaskId().getTaskType() == TaskType.MAP) {
MapAttemptFinishedEvent mfe =
new MapAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
state.toString(),
this.reportedStatus.mapFinishTime,
finishTime,
containerHostName,
containerNodePort,
this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
this.reportedStatus.stateString,
getCounters(),
getProgressSplitBlock().burst());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
} else {
ReduceAttemptFinishedEvent rfe =
new ReduceAttemptFinishedEvent(TypeConverter.fromYarn(attemptId),
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
state.toString(),
this.reportedStatus.shuffleFinishTime,
this.reportedStatus.sortFinishTime,
finishTime,
containerHostName,
containerNodePort,
this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
this.reportedStatus.stateString,
getCounters(),
getProgressSplitBlock().burst());
eventHandler.handle(
new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
}
}
private static class TooManyFetchFailureTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) {
TaskAttemptTooManyFetchFailureEvent fetchFailureEvent =
(TaskAttemptTooManyFetchFailureEvent) event;
// too many fetch failure can only happen for map tasks
Preconditions
.checkArgument(taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP);
//add to diagnostic
taskAttempt.addDiagnosticInfo("Too many fetch failures."
+ " Failing the attempt. Last failure reported by " +
fetchFailureEvent.getReduceId() +
" from host " + fetchFailureEvent.getReduceHost());
if (taskAttempt.getLaunchTime() != 0) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAFailed(taskAttempt, true));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
TaskAttemptStateInternal.FAILED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
}else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
}
private static class KilledAfterSuccessTransition implements
MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent, TaskAttemptStateInternal> {
@SuppressWarnings("unchecked")
@Override
public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
// after a reduce task has succeeded, its outputs are in safe in HDFS.
// logically such a task should not be killed. we only come here when
// there is a race condition in the event queue. E.g. some logic sends
// a kill request to this attempt when the successful completion event
// for this task is already in the event queue. so the kill event will
// get executed immediately after the attempt is marked successful and
// result in this transition being exercised.
// ignore this for reduce tasks
LOG.info("Ignoring killed event for successful reduce task attempt" +
taskAttempt.getID().toString());
return TaskAttemptStateInternal.SUCCEEDED;
}
if(event instanceof TaskAttemptKillEvent) {
TaskAttemptKillEvent msgEvent = (TaskAttemptKillEvent) event;
//add to diagnostic
taskAttempt.addDiagnosticInfo(msgEvent.getMessage());
}
// not setting a finish time since it was set on success
assert (taskAttempt.getFinishTime() != 0);
assert (taskAttempt.getLaunchTime() != 0);
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAKilled(taskAttempt, true));
TaskAttemptUnsuccessfulCompletionEvent tauce = createTaskAttemptUnsuccessfulCompletionEvent(
taskAttempt, TaskAttemptStateInternal.KILLED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(taskAttempt.attemptId
.getTaskId().getJobId(), tauce));
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId, TaskEventType.T_ATTEMPT_KILLED));
return TaskAttemptStateInternal.KILLED;
}
}
private static class KilledAfterSucceededFinishingTransition
implements MultipleArcTransition<TaskAttemptImpl, TaskAttemptEvent,
TaskAttemptStateInternal> {
@SuppressWarnings("unchecked")
@Override
public TaskAttemptStateInternal transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
taskAttempt.attemptId);
sendContainerCleanup(taskAttempt, event);
if(taskAttempt.getID().getTaskId().getTaskType() == TaskType.REDUCE) {
// after a reduce task has succeeded, its outputs are in safe in HDFS.
// logically such a task should not be killed. we only come here when
// there is a race condition in the event queue. E.g. some logic sends
// a kill request to this attempt when the successful completion event
// for this task is already in the event queue. so the kill event will
// get executed immediately after the attempt is marked successful and
// result in this transition being exercised.
// ignore this for reduce tasks
LOG.info("Ignoring killed event for successful reduce task attempt" +
taskAttempt.getID().toString());
return TaskAttemptStateInternal.SUCCESS_CONTAINER_CLEANUP;
} else {
return TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP;
}
}
}
private static class KilledTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
//set the finish time
taskAttempt.setFinishTime();
if (taskAttempt.getLaunchTime() != 0) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAKilled(taskAttempt, false));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
TaskAttemptStateInternal.KILLED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
}else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
if (event instanceof TaskAttemptKillEvent) {
taskAttempt.addDiagnosticInfo(
((TaskAttemptKillEvent) event).getMessage());
}
// taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.KILLED); Not logging Map/Reduce attempts in case of failure.
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_KILLED));
}
}
/**
* Transition from SUCCESS_FINISHING_CONTAINER or FAIL_FINISHING_CONTAINER
* state upon receiving TA_TIMED_OUT event
*/
private static class ExitFinishingOnTimeoutTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
taskAttempt.appContext.getTaskAttemptFinishingMonitor().unregister(
taskAttempt.attemptId);
// The attempt stays in finishing state for too long
String msg = "Task attempt " + taskAttempt.getID() + " is done from " +
"TaskUmbilicalProtocol's point of view. However, it stays in " +
"finishing state for too long";
LOG.warn(msg);
taskAttempt.addDiagnosticInfo(msg);
sendContainerCleanup(taskAttempt, event);
}
}
/**
* Finish and clean up the container
*/
private static class CleanupContainerTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// unregister it to TaskAttemptListener so that it stops listening
// for it.
finalizeProgress(taskAttempt);
sendContainerCleanup(taskAttempt, event);
}
}
@SuppressWarnings("unchecked")
private static void sendContainerCleanup(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
if (event instanceof TaskAttemptKillEvent) {
taskAttempt.addDiagnosticInfo(
((TaskAttemptKillEvent) event).getMessage());
}
//send the cleanup event to containerLauncher
taskAttempt.eventHandler.handle(new ContainerLauncherEvent(
taskAttempt.attemptId,
taskAttempt.container.getId(), StringInterner
.weakIntern(taskAttempt.container.getNodeId().toString()),
taskAttempt.container.getContainerToken(),
ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP));
}
/**
* Transition to SUCCESS_FINISHING_CONTAINER upon receiving TA_DONE event
*/
private static class MoveContainerToSucceededFinishingTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
finalizeProgress(taskAttempt);
// register it to finishing state
taskAttempt.appContext.getTaskAttemptFinishingMonitor().register(
taskAttempt.attemptId);
// set the finish time
taskAttempt.setFinishTime();
// notify job history
taskAttempt.eventHandler.handle(
createJobCounterUpdateEventTASucceeded(taskAttempt));
taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.SUCCEEDED);
//notify the task even though the container might not have exited yet.
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId,
TaskEventType.T_ATTEMPT_SUCCEEDED));
taskAttempt.eventHandler.handle
(new SpeculatorEvent
(taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
}
}
/**
* Transition to FAIL_FINISHING_CONTAINER upon receiving TA_FAILMSG event
*/
private static class MoveContainerToFailedFinishingTransition implements
SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
finalizeProgress(taskAttempt);
// register it to finishing state
taskAttempt.appContext.getTaskAttemptFinishingMonitor().register(
taskAttempt.attemptId);
notifyTaskAttemptFailed(taskAttempt);
}
}
@SuppressWarnings("unchecked")
private static void notifyTaskAttemptFailed(TaskAttemptImpl taskAttempt) {
// set the finish time
taskAttempt.setFinishTime();
if (taskAttempt.getLaunchTime() != 0) {
taskAttempt.eventHandler
.handle(createJobCounterUpdateEventTAFailed(taskAttempt, false));
TaskAttemptUnsuccessfulCompletionEvent tauce =
createTaskAttemptUnsuccessfulCompletionEvent(taskAttempt,
TaskAttemptStateInternal.FAILED);
taskAttempt.eventHandler.handle(new JobHistoryEvent(
taskAttempt.attemptId.getTaskId().getJobId(), tauce));
// taskAttempt.logAttemptFinishedEvent(TaskAttemptStateInternal.FAILED); Not
// handling failed map/reduce events.
}else {
LOG.debug("Not generating HistoryFinish event since start event not " +
"generated for taskAttempt: " + taskAttempt.getID());
}
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
taskAttempt.attemptId, TaskEventType.T_ATTEMPT_FAILED));
}
private void addDiagnosticInfo(String diag) {
if (diag != null && !diag.equals("")) {
diagnostics.add(diag);
}
}
private static class StatusUpdater
implements SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@SuppressWarnings("unchecked")
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
// Status update calls don't really change the state of the attempt.
TaskAttemptStatus newReportedStatus =
((TaskAttemptStatusUpdateEvent) event)
.getReportedTaskAttemptStatus();
// Now switch the information in the reportedStatus
taskAttempt.reportedStatus = newReportedStatus;
taskAttempt.reportedStatus.taskState = taskAttempt.getState();
// send event to speculator about the reported status
taskAttempt.eventHandler.handle
(new SpeculatorEvent
(taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
taskAttempt.updateProgressSplits();
//if fetch failures are present, send the fetch failure event to job
//this only will happen in reduce attempt type
if (taskAttempt.reportedStatus.fetchFailedMaps != null &&
taskAttempt.reportedStatus.fetchFailedMaps.size() > 0) {
String hostname = taskAttempt.container == null ? "UNKNOWN"
: taskAttempt.container.getNodeId().getHost();
taskAttempt.eventHandler.handle(new JobTaskAttemptFetchFailureEvent(
taskAttempt.attemptId, taskAttempt.reportedStatus.fetchFailedMaps,
hostname));
}
}
}
private static class DiagnosticInformationUpdater
implements SingleArcTransition<TaskAttemptImpl, TaskAttemptEvent> {
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
TaskAttemptDiagnosticsUpdateEvent diagEvent =
(TaskAttemptDiagnosticsUpdateEvent) event;
LOG.info("Diagnostics report from " + taskAttempt.attemptId + ": "
+ diagEvent.getDiagnosticInfo());
taskAttempt.addDiagnosticInfo(diagEvent.getDiagnosticInfo());
}
}
private void initTaskAttemptStatus(TaskAttemptStatus result) {
result.progress = 0.0f;
result.phase = Phase.STARTING;
result.stateString = "NEW";
result.taskState = TaskAttemptState.NEW;
Counters counters = EMPTY_COUNTERS;
result.counters = counters;
}
}
| 93,376
| 40.464032
| 129
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.job.impl;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobQueueChangeEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterJobAbortEvent;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterJobCommitEvent;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterJobSetupEvent;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobAbortCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitFailedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobSetupFailedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptTooManyFetchFailureEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskRecoverEvent;
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.state.InvalidStateTransitionException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Clock;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/** Implementation of Job interface. Maintains the state machines of Job.
* The read and write calls use ReadWriteLock for concurrency.
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
EventHandler<JobEvent> {
private static final TaskAttemptCompletionEvent[]
EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
private static final TaskCompletionEvent[]
EMPTY_TASK_COMPLETION_EVENTS = new TaskCompletionEvent[0];
private static final Log LOG = LogFactory.getLog(JobImpl.class);
//The maximum fraction of fetch failures allowed for a map
private float maxAllowedFetchFailuresFraction;
//Maximum no. of fetch-failure notifications after which map task is failed
private int maxFetchFailuresNotifications;
public static final String JOB_KILLED_DIAG =
"Job received Kill while in RUNNING state.";
//final fields
private final ApplicationAttemptId applicationAttemptId;
private final Clock clock;
private final JobACLsManager aclsManager;
private final String reporterUserName;
private final Map<JobACL, AccessControlList> jobACLs;
private float setupWeight = 0.05f;
private float cleanupWeight = 0.05f;
private float mapWeight = 0.0f;
private float reduceWeight = 0.0f;
private final Map<TaskId, TaskInfo> completedTasksFromPreviousRun;
private final List<AMInfo> amInfos;
private final Lock readLock;
private final Lock writeLock;
private final JobId jobId;
private final String jobName;
private final OutputCommitter committer;
private final boolean newApiCommitter;
private final org.apache.hadoop.mapreduce.JobID oldJobId;
private final TaskAttemptListener taskAttemptListener;
private final Object tasksSyncHandle = new Object();
private final Set<TaskId> mapTasks = new LinkedHashSet<TaskId>();
private final Set<TaskId> reduceTasks = new LinkedHashSet<TaskId>();
/**
* maps nodes to tasks that have run on those nodes
*/
private final HashMap<NodeId, List<TaskAttemptId>>
nodesToSucceededTaskAttempts = new HashMap<NodeId, List<TaskAttemptId>>();
private final EventHandler eventHandler;
private final MRAppMetrics metrics;
private final String userName;
private String queueName;
private final long appSubmitTime;
private final AppContext appContext;
private boolean lazyTasksCopyNeeded = false;
volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
private Counters jobCounters = new Counters();
private Object fullCountersLock = new Object();
private Counters fullCounters = null;
private Counters finalMapCounters = null;
private Counters finalReduceCounters = null;
// FIXME:
//
// Can then replace task-level uber counters (MR-2424) with job-level ones
// sent from LocalContainerLauncher, and eventually including a count of
// of uber-AM attempts (probably sent from MRAppMaster).
public JobConf conf;
//fields initialized in init
private FileSystem fs;
private Path remoteJobSubmitDir;
public Path remoteJobConfFile;
private JobContext jobContext;
private int allowedMapFailuresPercent = 0;
private int allowedReduceFailuresPercent = 0;
private List<TaskAttemptCompletionEvent> taskAttemptCompletionEvents;
private List<TaskCompletionEvent> mapAttemptCompletionEvents;
private List<Integer> taskCompletionIdxToMapCompletionIdx;
private final List<String> diagnostics = new ArrayList<String>();
//task/attempt related datastructures
private final Map<TaskId, Integer> successAttemptCompletionEventNoMap =
new HashMap<TaskId, Integer>();
private final Map<TaskAttemptId, Integer> fetchFailuresMapping =
new HashMap<TaskAttemptId, Integer>();
private static final DiagnosticsUpdateTransition
DIAGNOSTIC_UPDATE_TRANSITION = new DiagnosticsUpdateTransition();
private static final InternalErrorTransition
INTERNAL_ERROR_TRANSITION = new InternalErrorTransition();
private static final InternalRebootTransition
INTERNAL_REBOOT_TRANSITION = new InternalRebootTransition();
private static final TaskAttemptCompletedEventTransition
TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION =
new TaskAttemptCompletedEventTransition();
private static final CounterUpdateTransition COUNTER_UPDATE_TRANSITION =
new CounterUpdateTransition();
private static final UpdatedNodesTransition UPDATED_NODES_TRANSITION =
new UpdatedNodesTransition();
protected static final
StateMachineFactory<JobImpl, JobStateInternal, JobEventType, JobEvent>
stateMachineFactory
= new StateMachineFactory<JobImpl, JobStateInternal, JobEventType, JobEvent>
(JobStateInternal.NEW)
// Transitions from NEW state
.addTransition(JobStateInternal.NEW, JobStateInternal.NEW,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.NEW, JobStateInternal.NEW,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition
(JobStateInternal.NEW,
EnumSet.of(JobStateInternal.INITED, JobStateInternal.NEW),
JobEventType.JOB_INIT,
new InitTransition())
.addTransition(JobStateInternal.NEW, JobStateInternal.FAIL_ABORT,
JobEventType.JOB_INIT_FAILED,
new InitFailedTransition())
.addTransition(JobStateInternal.NEW, JobStateInternal.KILLED,
JobEventType.JOB_KILL,
new KillNewJobTransition())
.addTransition(JobStateInternal.NEW, JobStateInternal.ERROR,
JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
.addTransition(JobStateInternal.NEW, JobStateInternal.REBOOT,
JobEventType.JOB_AM_REBOOT,
INTERNAL_REBOOT_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.NEW, JobStateInternal.NEW,
JobEventType.JOB_UPDATED_NODES)
// Transitions from INITED state
.addTransition(JobStateInternal.INITED, JobStateInternal.INITED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.INITED, JobStateInternal.INITED,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.INITED, JobStateInternal.SETUP,
JobEventType.JOB_START,
new StartTransition())
.addTransition(JobStateInternal.INITED, JobStateInternal.KILLED,
JobEventType.JOB_KILL,
new KillInitedJobTransition())
.addTransition(JobStateInternal.INITED, JobStateInternal.ERROR,
JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
.addTransition(JobStateInternal.INITED, JobStateInternal.REBOOT,
JobEventType.JOB_AM_REBOOT,
INTERNAL_REBOOT_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.INITED, JobStateInternal.INITED,
JobEventType.JOB_UPDATED_NODES)
// Transitions from SETUP state
.addTransition(JobStateInternal.SETUP, JobStateInternal.SETUP,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.SETUP, JobStateInternal.SETUP,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.SETUP, JobStateInternal.RUNNING,
JobEventType.JOB_SETUP_COMPLETED,
new SetupCompletedTransition())
.addTransition(JobStateInternal.SETUP, JobStateInternal.FAIL_ABORT,
JobEventType.JOB_SETUP_FAILED,
new SetupFailedTransition())
.addTransition(JobStateInternal.SETUP, JobStateInternal.KILL_ABORT,
JobEventType.JOB_KILL,
new KilledDuringSetupTransition())
.addTransition(JobStateInternal.SETUP, JobStateInternal.ERROR,
JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
.addTransition(JobStateInternal.SETUP, JobStateInternal.REBOOT,
JobEventType.JOB_AM_REBOOT,
INTERNAL_REBOOT_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.SETUP, JobStateInternal.SETUP,
JobEventType.JOB_UPDATED_NODES)
// Transitions from RUNNING state
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION)
.addTransition
(JobStateInternal.RUNNING,
EnumSet.of(JobStateInternal.RUNNING,
JobStateInternal.COMMITTING, JobStateInternal.FAIL_WAIT,
JobStateInternal.FAIL_ABORT),
JobEventType.JOB_TASK_COMPLETED,
new TaskCompletedTransition())
.addTransition
(JobStateInternal.RUNNING,
EnumSet.of(JobStateInternal.RUNNING,
JobStateInternal.COMMITTING),
JobEventType.JOB_COMPLETED,
new JobNoTasksCompletedTransition())
.addTransition(JobStateInternal.RUNNING, JobStateInternal.KILL_WAIT,
JobEventType.JOB_KILL, new KillTasksTransition())
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_UPDATED_NODES,
UPDATED_NODES_TRANSITION)
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
new MapTaskRescheduledTransition())
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.RUNNING, JobStateInternal.RUNNING,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
new TaskAttemptFetchFailureTransition())
.addTransition(
JobStateInternal.RUNNING,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
.addTransition(JobStateInternal.RUNNING, JobStateInternal.REBOOT,
JobEventType.JOB_AM_REBOOT,
INTERNAL_REBOOT_TRANSITION)
// Transitions from KILL_WAIT state.
.addTransition
(JobStateInternal.KILL_WAIT,
EnumSet.of(JobStateInternal.KILL_WAIT,
JobStateInternal.KILL_ABORT),
JobEventType.JOB_TASK_COMPLETED,
new KillWaitTaskCompletedTransition())
.addTransition(JobStateInternal.KILL_WAIT, JobStateInternal.KILL_WAIT,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
TASK_ATTEMPT_COMPLETED_EVENT_TRANSITION)
.addTransition(JobStateInternal.KILL_WAIT, JobStateInternal.KILL_WAIT,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.KILL_WAIT, JobStateInternal.KILL_WAIT,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(
JobStateInternal.KILL_WAIT,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.KILL_WAIT, JobStateInternal.KILL_WAIT,
EnumSet.of(JobEventType.JOB_KILL,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_AM_REBOOT))
// Transitions from COMMITTING state
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.SUCCEEDED,
JobEventType.JOB_COMMIT_COMPLETED,
new CommitSucceededTransition())
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.FAIL_ABORT,
JobEventType.JOB_COMMIT_FAILED,
new CommitFailedTransition())
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.KILL_ABORT,
JobEventType.JOB_KILL,
new KilledDuringCommitTransition())
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.COMMITTING,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.COMMITTING,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
.addTransition(JobStateInternal.COMMITTING, JobStateInternal.REBOOT,
JobEventType.JOB_AM_REBOOT,
INTERNAL_REBOOT_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.COMMITTING,
JobStateInternal.COMMITTING,
EnumSet.of(JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
// Transitions from SUCCEEDED state
.addTransition(JobStateInternal.SUCCEEDED, JobStateInternal.SUCCEEDED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.SUCCEEDED, JobStateInternal.SUCCEEDED,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(
JobStateInternal.SUCCEEDED,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.SUCCEEDED, JobStateInternal.SUCCEEDED,
EnumSet.of(JobEventType.JOB_KILL,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_AM_REBOOT,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED))
// Transitions from FAIL_WAIT state
.addTransition(JobStateInternal.FAIL_WAIT,
JobStateInternal.FAIL_WAIT,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.FAIL_WAIT,
JobStateInternal.FAIL_WAIT,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.FAIL_WAIT,
EnumSet.of(JobStateInternal.FAIL_WAIT, JobStateInternal.FAIL_ABORT),
JobEventType.JOB_TASK_COMPLETED,
new JobFailWaitTransition())
.addTransition(JobStateInternal.FAIL_WAIT,
JobStateInternal.FAIL_ABORT, JobEventType.JOB_FAIL_WAIT_TIMEDOUT,
new JobFailWaitTimedOutTransition())
.addTransition(JobStateInternal.FAIL_WAIT, JobStateInternal.KILLED,
JobEventType.JOB_KILL,
new KilledDuringAbortTransition())
.addTransition(JobStateInternal.FAIL_WAIT,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.FAIL_WAIT,
JobStateInternal.FAIL_WAIT,
EnumSet.of(JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_AM_REBOOT))
//Transitions from FAIL_ABORT state
.addTransition(JobStateInternal.FAIL_ABORT,
JobStateInternal.FAIL_ABORT,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.FAIL_ABORT,
JobStateInternal.FAIL_ABORT,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.FAIL_ABORT, JobStateInternal.FAILED,
JobEventType.JOB_ABORT_COMPLETED,
new JobAbortCompletedTransition())
.addTransition(JobStateInternal.FAIL_ABORT, JobStateInternal.KILLED,
JobEventType.JOB_KILL,
new KilledDuringAbortTransition())
.addTransition(JobStateInternal.FAIL_ABORT,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.FAIL_ABORT,
JobStateInternal.FAIL_ABORT,
EnumSet.of(JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_COMPLETED,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_AM_REBOOT,
JobEventType.JOB_FAIL_WAIT_TIMEDOUT))
// Transitions from KILL_ABORT state
.addTransition(JobStateInternal.KILL_ABORT,
JobStateInternal.KILL_ABORT,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.KILL_ABORT,
JobStateInternal.KILL_ABORT,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(JobStateInternal.KILL_ABORT, JobStateInternal.KILLED,
JobEventType.JOB_ABORT_COMPLETED,
new JobAbortCompletedTransition())
.addTransition(JobStateInternal.KILL_ABORT, JobStateInternal.KILLED,
JobEventType.JOB_KILL,
new KilledDuringAbortTransition())
.addTransition(JobStateInternal.KILL_ABORT,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.KILL_ABORT,
JobStateInternal.KILL_ABORT,
EnumSet.of(JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_SETUP_COMPLETED,
JobEventType.JOB_SETUP_FAILED,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_AM_REBOOT))
// Transitions from FAILED state
.addTransition(JobStateInternal.FAILED, JobStateInternal.FAILED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.FAILED, JobStateInternal.FAILED,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(
JobStateInternal.FAILED,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.FAILED, JobStateInternal.FAILED,
EnumSet.of(JobEventType.JOB_KILL,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_COMPLETED,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_SETUP_COMPLETED,
JobEventType.JOB_SETUP_FAILED,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_ABORT_COMPLETED,
JobEventType.JOB_AM_REBOOT))
// Transitions from KILLED state
.addTransition(JobStateInternal.KILLED, JobStateInternal.KILLED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
DIAGNOSTIC_UPDATE_TRANSITION)
.addTransition(JobStateInternal.KILLED, JobStateInternal.KILLED,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
.addTransition(
JobStateInternal.KILLED,
JobStateInternal.ERROR, JobEventType.INTERNAL_ERROR,
INTERNAL_ERROR_TRANSITION)
// Ignore-able events
.addTransition(JobStateInternal.KILLED, JobStateInternal.KILLED,
EnumSet.of(JobEventType.JOB_KILL,
JobEventType.JOB_START,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_SETUP_COMPLETED,
JobEventType.JOB_SETUP_FAILED,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_ABORT_COMPLETED,
JobEventType.JOB_AM_REBOOT))
// No transitions from INTERNAL_ERROR state. Ignore all.
.addTransition(
JobStateInternal.ERROR,
JobStateInternal.ERROR,
EnumSet.of(JobEventType.JOB_INIT,
JobEventType.JOB_KILL,
JobEventType.JOB_TASK_COMPLETED,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_SETUP_COMPLETED,
JobEventType.JOB_SETUP_FAILED,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_ABORT_COMPLETED,
JobEventType.INTERNAL_ERROR,
JobEventType.JOB_AM_REBOOT))
.addTransition(JobStateInternal.ERROR, JobStateInternal.ERROR,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
// No transitions from AM_REBOOT state. Ignore all.
.addTransition(
JobStateInternal.REBOOT,
JobStateInternal.REBOOT,
EnumSet.of(JobEventType.JOB_INIT,
JobEventType.JOB_KILL,
JobEventType.JOB_TASK_COMPLETED,
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
JobEventType.JOB_MAP_TASK_RESCHEDULED,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
JobEventType.JOB_UPDATED_NODES,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.JOB_SETUP_COMPLETED,
JobEventType.JOB_SETUP_FAILED,
JobEventType.JOB_COMMIT_COMPLETED,
JobEventType.JOB_COMMIT_FAILED,
JobEventType.JOB_ABORT_COMPLETED,
JobEventType.INTERNAL_ERROR,
JobEventType.JOB_AM_REBOOT))
.addTransition(JobStateInternal.REBOOT, JobStateInternal.REBOOT,
JobEventType.JOB_COUNTER_UPDATE, COUNTER_UPDATE_TRANSITION)
// create the topology tables
.installTopology();
private final StateMachine<JobStateInternal, JobEventType, JobEvent> stateMachine;
//changing fields while the job is running
private int numMapTasks;
private int numReduceTasks;
private int completedTaskCount = 0;
private int succeededMapTaskCount = 0;
private int succeededReduceTaskCount = 0;
private int failedMapTaskCount = 0;
private int failedReduceTaskCount = 0;
private int killedMapTaskCount = 0;
private int killedReduceTaskCount = 0;
private long startTime;
private long finishTime;
private float setupProgress;
private float mapProgress;
private float reduceProgress;
private float cleanupProgress;
private boolean isUber = false;
private Credentials jobCredentials;
private Token<JobTokenIdentifier> jobToken;
private JobTokenSecretManager jobTokenSecretManager;
private JobStateInternal forcedState = null;
//Executor used for running future tasks.
private ScheduledThreadPoolExecutor executor;
private ScheduledFuture failWaitTriggerScheduledFuture;
private JobState lastNonFinalState = JobState.NEW;
public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId,
Configuration conf, EventHandler eventHandler,
TaskAttemptListener taskAttemptListener,
JobTokenSecretManager jobTokenSecretManager,
Credentials jobCredentials, Clock clock,
Map<TaskId, TaskInfo> completedTasksFromPreviousRun, MRAppMetrics metrics,
OutputCommitter committer, boolean newApiCommitter, String userName,
long appSubmitTime, List<AMInfo> amInfos, AppContext appContext,
JobStateInternal forcedState, String forcedDiagnostic) {
this.applicationAttemptId = applicationAttemptId;
this.jobId = jobId;
this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
this.conf = new JobConf(conf);
this.metrics = metrics;
this.clock = clock;
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
this.amInfos = amInfos;
this.appContext = appContext;
this.userName = userName;
this.queueName = conf.get(MRJobConfig.QUEUE_NAME, "default");
this.appSubmitTime = appSubmitTime;
this.oldJobId = TypeConverter.fromYarn(jobId);
this.committer = committer;
this.newApiCommitter = newApiCommitter;
this.taskAttemptListener = taskAttemptListener;
this.eventHandler = eventHandler;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
this.readLock = readWriteLock.readLock();
this.writeLock = readWriteLock.writeLock();
this.jobCredentials = jobCredentials;
this.jobTokenSecretManager = jobTokenSecretManager;
this.aclsManager = new JobACLsManager(conf);
this.reporterUserName = System.getProperty("user.name");
this.jobACLs = aclsManager.constructJobACLs(conf);
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("Job Fail Wait Timeout Monitor #%d")
.setDaemon(true)
.build();
this.executor = new ScheduledThreadPoolExecutor(1, threadFactory);
// This "this leak" is okay because the retained pointer is in an
// instance variable.
stateMachine = stateMachineFactory.make(this);
this.forcedState = forcedState;
if(forcedDiagnostic != null) {
this.diagnostics.add(forcedDiagnostic);
}
this.maxAllowedFetchFailuresFraction = conf.getFloat(
MRJobConfig.MAX_ALLOWED_FETCH_FAILURES_FRACTION,
MRJobConfig.DEFAULT_MAX_ALLOWED_FETCH_FAILURES_FRACTION);
this.maxFetchFailuresNotifications = conf.getInt(
MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
}
protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() {
return stateMachine;
}
@Override
public JobId getID() {
return jobId;
}
EventHandler getEventHandler() {
return this.eventHandler;
}
JobContext getJobContext() {
return this.jobContext;
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation) {
AccessControlList jobACL = jobACLs.get(jobOperation);
if (jobACL == null) {
return true;
}
return aclsManager.checkAccess(callerUGI, jobOperation, userName, jobACL);
}
@Override
public Task getTask(TaskId taskID) {
readLock.lock();
try {
return tasks.get(taskID);
} finally {
readLock.unlock();
}
}
@Override
public int getCompletedMaps() {
readLock.lock();
try {
return succeededMapTaskCount + failedMapTaskCount + killedMapTaskCount;
} finally {
readLock.unlock();
}
}
@Override
public int getCompletedReduces() {
readLock.lock();
try {
return succeededReduceTaskCount + failedReduceTaskCount
+ killedReduceTaskCount;
} finally {
readLock.unlock();
}
}
@Override
public boolean isUber() {
return isUber;
}
@Override
public Counters getAllCounters() {
readLock.lock();
try {
JobStateInternal state = getInternalState();
if (state == JobStateInternal.ERROR || state == JobStateInternal.FAILED
|| state == JobStateInternal.KILLED || state == JobStateInternal.SUCCEEDED) {
this.mayBeConstructFinalFullCounters();
return fullCounters;
}
Counters counters = new Counters();
counters.incrAllCounters(jobCounters);
return incrTaskCounters(counters, tasks.values());
} finally {
readLock.unlock();
}
}
public static Counters incrTaskCounters(
Counters counters, Collection<Task> tasks) {
for (Task task : tasks) {
counters.incrAllCounters(task.getCounters());
}
return counters;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
readLock.lock();
try {
if (taskAttemptCompletionEvents.size() > fromEventId) {
int actualMax = Math.min(maxEvents,
(taskAttemptCompletionEvents.size() - fromEventId));
events = taskAttemptCompletionEvents.subList(fromEventId,
actualMax + fromEventId).toArray(events);
}
return events;
} finally {
readLock.unlock();
}
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
readLock.lock();
try {
if (mapAttemptCompletionEvents.size() > startIndex) {
int actualMax = Math.min(maxEvents,
(mapAttemptCompletionEvents.size() - startIndex));
events = mapAttemptCompletionEvents.subList(startIndex,
actualMax + startIndex).toArray(events);
}
return events;
} finally {
readLock.unlock();
}
}
@Override
public List<String> getDiagnostics() {
readLock.lock();
try {
return diagnostics;
} finally {
readLock.unlock();
}
}
@Override
public JobReport getReport() {
readLock.lock();
try {
JobState state = getState();
// jobFile can be null if the job is not yet inited.
String jobFile =
remoteJobConfFile == null ? "" : remoteJobConfFile.toString();
StringBuilder diagsb = new StringBuilder();
for (String s : getDiagnostics()) {
diagsb.append(s).append("\n");
}
if (getInternalState() == JobStateInternal.NEW) {
return MRBuilderUtils.newJobReport(jobId, jobName, reporterUserName,
state, appSubmitTime, startTime, finishTime, setupProgress, 0.0f,
0.0f, cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
}
computeProgress();
JobReport report = MRBuilderUtils.newJobReport(jobId, jobName,
reporterUserName,
state, appSubmitTime, startTime, finishTime, setupProgress,
this.mapProgress, this.reduceProgress,
cleanupProgress, jobFile, amInfos, isUber, diagsb.toString());
return report;
} finally {
readLock.unlock();
}
}
@Override
public float getProgress() {
this.readLock.lock();
try {
computeProgress();
return (this.setupProgress * this.setupWeight + this.cleanupProgress
* this.cleanupWeight + this.mapProgress * this.mapWeight + this.reduceProgress
* this.reduceWeight);
} finally {
this.readLock.unlock();
}
}
private void computeProgress() {
this.readLock.lock();
try {
float mapProgress = 0f;
float reduceProgress = 0f;
for (Task task : this.tasks.values()) {
if (task.getType() == TaskType.MAP) {
mapProgress += (task.isFinished() ? 1f : task.getProgress());
} else {
reduceProgress += (task.isFinished() ? 1f : task.getProgress());
}
}
if (this.numMapTasks != 0) {
mapProgress = mapProgress / this.numMapTasks;
}
if (this.numReduceTasks != 0) {
reduceProgress = reduceProgress / this.numReduceTasks;
}
this.mapProgress = mapProgress;
this.reduceProgress = reduceProgress;
} finally {
this.readLock.unlock();
}
}
@Override
public Map<TaskId, Task> getTasks() {
synchronized (tasksSyncHandle) {
lazyTasksCopyNeeded = true;
return Collections.unmodifiableMap(tasks);
}
}
@Override
public Map<TaskId,Task> getTasks(TaskType taskType) {
Map<TaskId, Task> localTasksCopy = tasks;
Map<TaskId, Task> result = new HashMap<TaskId, Task>();
Set<TaskId> tasksOfGivenType = null;
readLock.lock();
try {
if (TaskType.MAP == taskType) {
tasksOfGivenType = mapTasks;
} else {
tasksOfGivenType = reduceTasks;
}
for (TaskId taskID : tasksOfGivenType)
result.put(taskID, localTasksCopy.get(taskID));
return result;
} finally {
readLock.unlock();
}
}
@Override
public JobState getState() {
readLock.lock();
try {
JobState state = getExternalState(getInternalState());
if (!appContext.hasSuccessfullyUnregistered()
&& (state == JobState.SUCCEEDED || state == JobState.FAILED
|| state == JobState.KILLED || state == JobState.ERROR)) {
return lastNonFinalState;
} else {
return state;
}
} finally {
readLock.unlock();
}
}
protected void scheduleTasks(Set<TaskId> taskIDs,
boolean recoverTaskOutput) {
for (TaskId taskID : taskIDs) {
TaskInfo taskInfo = completedTasksFromPreviousRun.remove(taskID);
if (taskInfo != null) {
eventHandler.handle(new TaskRecoverEvent(taskID, taskInfo,
committer, recoverTaskOutput));
} else {
eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_SCHEDULE));
}
}
}
@Override
/**
* The only entry point to change the Job.
*/
public void handle(JobEvent event) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing " + event.getJobId() + " of type "
+ event.getType());
}
try {
writeLock.lock();
JobStateInternal oldState = getInternalState();
try {
getStateMachine().doTransition(event.getType(), event);
} catch (InvalidStateTransitionException e) {
LOG.error("Can't handle this event at current state", e);
addDiagnostic("Invalid event " + event.getType() +
" on Job " + this.jobId);
eventHandler.handle(new JobEvent(this.jobId,
JobEventType.INTERNAL_ERROR));
}
//notify the eventhandler of state change
if (oldState != getInternalState()) {
LOG.info(jobId + "Job Transitioned from " + oldState + " to "
+ getInternalState());
rememberLastNonFinalState(oldState);
}
}
finally {
writeLock.unlock();
}
}
private void rememberLastNonFinalState(JobStateInternal stateInternal) {
JobState state = getExternalState(stateInternal);
// if state is not the final state, set lastNonFinalState
if (state != JobState.SUCCEEDED && state != JobState.FAILED
&& state != JobState.KILLED && state != JobState.ERROR) {
lastNonFinalState = state;
}
}
@Private
public JobStateInternal getInternalState() {
readLock.lock();
try {
if(forcedState != null) {
return forcedState;
}
return getStateMachine().getCurrentState();
} finally {
readLock.unlock();
}
}
private JobState getExternalState(JobStateInternal smState) {
switch (smState) {
case KILL_WAIT:
case KILL_ABORT:
return JobState.KILLED;
case SETUP:
case COMMITTING:
return JobState.RUNNING;
case FAIL_WAIT:
case FAIL_ABORT:
return JobState.FAILED;
case REBOOT:
if (appContext.isLastAMRetry()) {
return JobState.ERROR;
} else {
// In case of not last retry, return the external state as RUNNING since
// otherwise JobClient will exit when it polls the AM for job state
return JobState.RUNNING;
}
default:
return JobState.valueOf(smState.name());
}
}
//helpful in testing
protected void addTask(Task task) {
synchronized (tasksSyncHandle) {
if (lazyTasksCopyNeeded) {
Map<TaskId, Task> newTasks = new LinkedHashMap<TaskId, Task>();
newTasks.putAll(tasks);
tasks = newTasks;
lazyTasksCopyNeeded = false;
}
}
tasks.put(task.getID(), task);
if (task.getType() == TaskType.MAP) {
mapTasks.add(task.getID());
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.add(task.getID());
}
metrics.waitingTask(task);
}
void setFinishTime() {
finishTime = clock.getTime();
}
void logJobHistoryFinishedEvent() {
this.setFinishTime();
JobFinishedEvent jfe = createJobFinishedEvent(this);
LOG.info("Calling handler for JobFinishedEvent ");
this.getEventHandler().handle(new JobHistoryEvent(this.jobId, jfe));
}
/**
* Create the default file System for this job.
* @param conf the conf object
* @return the default filesystem for this job
* @throws IOException
*/
protected FileSystem getFileSystem(Configuration conf) throws IOException {
return FileSystem.get(conf);
}
protected JobStateInternal checkReadyForCommit() {
JobStateInternal currentState = getInternalState();
if (completedTaskCount == tasks.size()
&& currentState == JobStateInternal.RUNNING) {
eventHandler.handle(new CommitterJobCommitEvent(jobId, getJobContext()));
return JobStateInternal.COMMITTING;
}
// return the current state as job not ready to commit yet
return getInternalState();
}
JobStateInternal finished(JobStateInternal finalState) {
if (getInternalState() == JobStateInternal.RUNNING) {
metrics.endRunningJob(this);
}
if (finishTime == 0) setFinishTime();
eventHandler.handle(new JobFinishEvent(jobId));
switch (finalState) {
case KILLED:
metrics.killedJob(this);
break;
case REBOOT:
case ERROR:
case FAILED:
metrics.failedJob(this);
break;
case SUCCEEDED:
metrics.completedJob(this);
break;
default:
throw new IllegalArgumentException("Illegal job state: " + finalState);
}
return finalState;
}
@Override
public String getUserName() {
return userName;
}
@Override
public String getQueueName() {
return queueName;
}
@Override
public void setQueueName(String queueName) {
this.queueName = queueName;
JobQueueChangeEvent jqce = new JobQueueChangeEvent(oldJobId, queueName);
eventHandler.handle(new JobHistoryEvent(jobId, jqce));
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.job.Job#getConfFile()
*/
@Override
public Path getConfFile() {
return remoteJobConfFile;
}
@Override
public String getName() {
return jobName;
}
@Override
public int getTotalMaps() {
return mapTasks.size(); //FIXME: why indirection? return numMapTasks...
// unless race? how soon can this get called?
}
@Override
public int getTotalReduces() {
return reduceTasks.size(); //FIXME: why indirection? return numReduceTasks
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.job.Job#getJobACLs()
*/
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return Collections.unmodifiableMap(jobACLs);
}
@Override
public List<AMInfo> getAMInfos() {
return amInfos;
}
/**
* Decide whether job can be run in uber mode based on various criteria.
* @param dataInputLength Total length for all splits
*/
private void makeUberDecision(long dataInputLength) {
//FIXME: need new memory criterion for uber-decision (oops, too late here;
// until AM-resizing supported,
// must depend on job client to pass fat-slot needs)
// these are no longer "system" settings, necessarily; user may override
int sysMaxMaps = conf.getInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 9);
int sysMaxReduces = conf.getInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1);
long sysMaxBytes = conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES,
fs.getDefaultBlockSize(this.remoteJobSubmitDir)); // FIXME: this is wrong; get FS from
// [File?]InputFormat and default block size
// from that
long sysMemSizeForUberSlot =
conf.getInt(MRJobConfig.MR_AM_VMEM_MB,
MRJobConfig.DEFAULT_MR_AM_VMEM_MB);
long sysCPUSizeForUberSlot =
conf.getInt(MRJobConfig.MR_AM_CPU_VCORES,
MRJobConfig.DEFAULT_MR_AM_CPU_VCORES);
boolean uberEnabled =
conf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
boolean smallNumMapTasks = (numMapTasks <= sysMaxMaps);
boolean smallNumReduceTasks = (numReduceTasks <= sysMaxReduces);
boolean smallInput = (dataInputLength <= sysMaxBytes);
// ignoring overhead due to UberAM and statics as negligible here:
long requiredMapMB = conf.getLong(MRJobConfig.MAP_MEMORY_MB, 0);
long requiredReduceMB = conf.getLong(MRJobConfig.REDUCE_MEMORY_MB, 0);
long requiredMB = Math.max(requiredMapMB, requiredReduceMB);
int requiredMapCores = conf.getInt(
MRJobConfig.MAP_CPU_VCORES,
MRJobConfig.DEFAULT_MAP_CPU_VCORES);
int requiredReduceCores = conf.getInt(
MRJobConfig.REDUCE_CPU_VCORES,
MRJobConfig.DEFAULT_REDUCE_CPU_VCORES);
int requiredCores = Math.max(requiredMapCores, requiredReduceCores);
if (numReduceTasks == 0) {
requiredMB = requiredMapMB;
requiredCores = requiredMapCores;
}
boolean smallMemory =
(requiredMB <= sysMemSizeForUberSlot)
|| (sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT);
boolean smallCpu = requiredCores <= sysCPUSizeForUberSlot;
boolean notChainJob = !isChainJob(conf);
// User has overall veto power over uberization, or user can modify
// limits (overriding system settings and potentially shooting
// themselves in the head). Note that ChainMapper/Reducer are
// fundamentally incompatible with MR-1220; they employ a blocking
// queue between the maps/reduces and thus require parallel execution,
// while "uber-AM" (MR AM + LocalContainerLauncher) loops over tasks
// and thus requires sequential execution.
isUber = uberEnabled && smallNumMapTasks && smallNumReduceTasks
&& smallInput && smallMemory && smallCpu
&& notChainJob;
if (isUber) {
LOG.info("Uberizing job " + jobId + ": " + numMapTasks + "m+"
+ numReduceTasks + "r tasks (" + dataInputLength
+ " input bytes) will run sequentially on single node.");
// make sure reduces are scheduled only after all map are completed
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,
1.0f);
// uber-subtask attempts all get launched on same node; if one fails,
// probably should retry elsewhere, i.e., move entire uber-AM: ergo,
// limit attempts to 1 (or at most 2? probably not...)
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
// disable speculation
conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, false);
conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
} else {
StringBuilder msg = new StringBuilder();
msg.append("Not uberizing ").append(jobId).append(" because:");
if (!uberEnabled)
msg.append(" not enabled;");
if (!smallNumMapTasks)
msg.append(" too many maps;");
if (!smallNumReduceTasks)
msg.append(" too many reduces;");
if (!smallInput)
msg.append(" too much input;");
if (!smallCpu)
msg.append(" too much CPU;");
if (!smallMemory)
msg.append(" too much RAM;");
if (!notChainJob)
msg.append(" chainjob;");
LOG.info(msg.toString());
}
}
/**
* ChainMapper and ChainReducer must execute in parallel, so they're not
* compatible with uberization/LocalContainerLauncher (100% sequential).
*/
private boolean isChainJob(Configuration conf) {
boolean isChainJob = false;
try {
String mapClassName = conf.get(MRJobConfig.MAP_CLASS_ATTR);
if (mapClassName != null) {
Class<?> mapClass = Class.forName(mapClassName);
if (ChainMapper.class.isAssignableFrom(mapClass))
isChainJob = true;
}
} catch (ClassNotFoundException cnfe) {
// don't care; assume it's not derived from ChainMapper
} catch (NoClassDefFoundError ignored) {
}
try {
String reduceClassName = conf.get(MRJobConfig.REDUCE_CLASS_ATTR);
if (reduceClassName != null) {
Class<?> reduceClass = Class.forName(reduceClassName);
if (ChainReducer.class.isAssignableFrom(reduceClass))
isChainJob = true;
}
} catch (ClassNotFoundException cnfe) {
// don't care; assume it's not derived from ChainReducer
} catch (NoClassDefFoundError ignored) {
}
return isChainJob;
}
private void actOnUnusableNode(NodeId nodeId, NodeState nodeState) {
// rerun previously successful map tasks
List<TaskAttemptId> taskAttemptIdList = nodesToSucceededTaskAttempts.get(nodeId);
if(taskAttemptIdList != null) {
String mesg = "TaskAttempt killed because it ran on unusable node "
+ nodeId;
for(TaskAttemptId id : taskAttemptIdList) {
if(TaskType.MAP == id.getTaskId().getTaskType()) {
// reschedule only map tasks because their outputs maybe unusable
LOG.info(mesg + ". AttemptId:" + id);
eventHandler.handle(new TaskAttemptKillEvent(id, mesg));
}
}
}
// currently running task attempts on unusable nodes are handled in
// RMContainerAllocator
}
/*
private int getBlockSize() {
String inputClassName = conf.get(MRJobConfig.INPUT_FORMAT_CLASS_ATTR);
if (inputClassName != null) {
Class<?> inputClass - Class.forName(inputClassName);
if (FileInputFormat<K, V>)
}
}
*/
/**
* Get the workflow adjacencies from the job conf
* The string returned is of the form "key"="value" "key"="value" ...
*/
private static String getWorkflowAdjacencies(Configuration conf) {
int prefixLen = MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_STRING.length();
Map<String,String> adjacencies =
conf.getValByRegex(MRJobConfig.WORKFLOW_ADJACENCY_PREFIX_PATTERN);
if (adjacencies.isEmpty()) {
return "";
}
int size = 0;
for (Entry<String,String> entry : adjacencies.entrySet()) {
int keyLen = entry.getKey().length();
size += keyLen - prefixLen;
size += entry.getValue().length() + 6;
}
StringBuilder sb = new StringBuilder(size);
for (Entry<String,String> entry : adjacencies.entrySet()) {
int keyLen = entry.getKey().length();
sb.append("\"");
sb.append(escapeString(entry.getKey().substring(prefixLen, keyLen)));
sb.append("\"=\"");
sb.append(escapeString(entry.getValue()));
sb.append("\" ");
}
return sb.toString();
}
public static String escapeString(String data) {
return StringUtils.escapeString(data, StringUtils.ESCAPE_CHAR,
new char[] {'"', '=', '.'});
}
public static class InitTransition
implements MultipleArcTransition<JobImpl, JobEvent, JobStateInternal> {
/**
* Note that this transition method is called directly (and synchronously)
* by MRAppMaster's init() method (i.e., no RPC, no thread-switching;
* just plain sequential call within AM context), so we can trigger
* modifications in AM state from here (at least, if AM is written that
* way; MR version is).
*/
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
job.metrics.submittedJob(job);
job.metrics.preparingJob(job);
if (job.newApiCommitter) {
job.jobContext = new JobContextImpl(job.conf,
job.oldJobId);
} else {
job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
job.conf, job.oldJobId);
}
try {
setup(job);
job.fs = job.getFileSystem(job.conf);
//log to job history
JobSubmittedEvent jse = new JobSubmittedEvent(job.oldJobId,
job.conf.get(MRJobConfig.JOB_NAME, "test"),
job.conf.get(MRJobConfig.USER_NAME, "mapred"),
job.appSubmitTime,
job.remoteJobConfFile.toString(),
job.jobACLs, job.queueName,
job.conf.get(MRJobConfig.WORKFLOW_ID, ""),
job.conf.get(MRJobConfig.WORKFLOW_NAME, ""),
job.conf.get(MRJobConfig.WORKFLOW_NODE_NAME, ""),
getWorkflowAdjacencies(job.conf),
job.conf.get(MRJobConfig.WORKFLOW_TAGS, ""));
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jse));
//TODO JH Verify jobACLs, UserName via UGI?
TaskSplitMetaInfo[] taskSplitMetaInfo = createSplits(job, job.jobId);
job.numMapTasks = taskSplitMetaInfo.length;
job.numReduceTasks = job.conf.getInt(MRJobConfig.NUM_REDUCES, 0);
if (job.numMapTasks == 0 && job.numReduceTasks == 0) {
job.addDiagnostic("No of maps and reduces are 0 " + job.jobId);
} else if (job.numMapTasks == 0) {
job.reduceWeight = 0.9f;
} else if (job.numReduceTasks == 0) {
job.mapWeight = 0.9f;
} else {
job.mapWeight = job.reduceWeight = 0.45f;
}
checkTaskLimits();
long inputLength = 0;
for (int i = 0; i < job.numMapTasks; ++i) {
inputLength += taskSplitMetaInfo[i].getInputDataLength();
}
job.makeUberDecision(inputLength);
job.taskAttemptCompletionEvents =
new ArrayList<TaskAttemptCompletionEvent>(
job.numMapTasks + job.numReduceTasks + 10);
job.mapAttemptCompletionEvents =
new ArrayList<TaskCompletionEvent>(job.numMapTasks + 10);
job.taskCompletionIdxToMapCompletionIdx = new ArrayList<Integer>(
job.numMapTasks + job.numReduceTasks + 10);
job.allowedMapFailuresPercent =
job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0);
job.allowedReduceFailuresPercent =
job.conf.getInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 0);
// create the Tasks but don't start them yet
createMapTasks(job, inputLength, taskSplitMetaInfo);
createReduceTasks(job);
job.metrics.endPreparingJob(job);
return JobStateInternal.INITED;
} catch (Exception e) {
LOG.warn("Job init failed", e);
job.metrics.endPreparingJob(job);
job.addDiagnostic("Job init failed : "
+ StringUtils.stringifyException(e));
// Leave job in the NEW state. The MR AM will detect that the state is
// not INITED and send a JOB_INIT_FAILED event.
return JobStateInternal.NEW;
}
}
protected void setup(JobImpl job) throws IOException {
String oldJobIDString = job.oldJobId.toString();
String user =
UserGroupInformation.getCurrentUser().getShortUserName();
Path path = MRApps.getStagingAreaDir(job.conf, user);
if(LOG.isDebugEnabled()) {
LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString);
}
job.remoteJobSubmitDir =
FileSystem.get(job.conf).makeQualified(
new Path(path, oldJobIDString));
job.remoteJobConfFile =
new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
// Prepare the TaskAttemptListener server for authentication of Containers
// TaskAttemptListener gets the information via jobTokenSecretManager.
JobTokenIdentifier identifier =
new JobTokenIdentifier(new Text(oldJobIDString));
job.jobToken =
new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager);
job.jobToken.setService(identifier.getJobId());
// Add it to the jobTokenSecretManager so that TaskAttemptListener server
// can authenticate containers(tasks)
job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken);
LOG.info("Adding job token for " + oldJobIDString
+ " to jobTokenSecretManager");
// If the job client did not setup the shuffle secret then reuse
// the job token secret for the shuffle.
if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) {
LOG.warn("Shuffle secret key missing from job credentials."
+ " Using job token secret as shuffle secret.");
TokenCache.setShuffleSecretKey(job.jobToken.getPassword(),
job.jobCredentials);
}
}
private void createMapTasks(JobImpl job, long inputLength,
TaskSplitMetaInfo[] splits) {
for (int i=0; i < job.numMapTasks; ++i) {
TaskImpl task =
new MapTaskImpl(job.jobId, i,
job.eventHandler,
job.remoteJobConfFile,
job.conf, splits[i],
job.taskAttemptListener,
job.jobToken, job.jobCredentials,
job.clock,
job.applicationAttemptId.getAttemptId(),
job.metrics, job.appContext);
job.addTask(task);
}
LOG.info("Input size for job " + job.jobId + " = " + inputLength
+ ". Number of splits = " + splits.length);
}
private void createReduceTasks(JobImpl job) {
for (int i = 0; i < job.numReduceTasks; i++) {
TaskImpl task =
new ReduceTaskImpl(job.jobId, i,
job.eventHandler,
job.remoteJobConfFile,
job.conf, job.numMapTasks,
job.taskAttemptListener, job.jobToken,
job.jobCredentials, job.clock,
job.applicationAttemptId.getAttemptId(),
job.metrics, job.appContext);
job.addTask(task);
}
LOG.info("Number of reduces for job " + job.jobId + " = "
+ job.numReduceTasks);
}
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
TaskSplitMetaInfo[] allTaskSplitMetaInfo;
try {
allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
job.oldJobId, job.fs,
job.conf,
job.remoteJobSubmitDir);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
return allTaskSplitMetaInfo;
}
/**
* If the number of tasks are greater than the configured value
* throw an exception that will fail job initialization
*/
private void checkTaskLimits() {
// no code, for now
}
} // end of InitTransition
private static class InitFailedTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
}
}
private static class SetupCompletedTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.setupProgress = 1.0f;
job.scheduleTasks(job.mapTasks, job.numReduceTasks == 0);
job.scheduleTasks(job.reduceTasks, true);
// If we have no tasks, just transition to job completed
if (job.numReduceTasks == 0 && job.numMapTasks == 0) {
job.eventHandler.handle(new JobEvent(job.jobId,
JobEventType.JOB_COMPLETED));
}
}
}
private static class SetupFailedTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.metrics.endRunningJob(job);
job.addDiagnostic("Job setup failed : "
+ ((JobSetupFailedEvent) event).getMessage());
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
}
}
public static class StartTransition
implements SingleArcTransition<JobImpl, JobEvent> {
/**
* This transition executes in the event-dispatcher thread, though it's
* triggered in MRAppMaster's startJobs() method.
*/
@Override
public void transition(JobImpl job, JobEvent event) {
JobStartEvent jse = (JobStartEvent) event;
if (jse.getRecoveredJobStartTime() != -1L) {
job.startTime = jse.getRecoveredJobStartTime();
} else {
job.startTime = job.clock.getTime();
}
JobInitedEvent jie =
new JobInitedEvent(job.oldJobId,
job.startTime,
job.numMapTasks, job.numReduceTasks,
job.getState().toString(),
job.isUber());
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
job.appSubmitTime, job.startTime);
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
job.metrics.runningJob(job);
job.eventHandler.handle(new CommitterJobSetupEvent(
job.jobId, job.jobContext));
}
}
private void unsuccessfulFinish(JobStateInternal finalState) {
if (finishTime == 0) setFinishTime();
cleanupProgress = 1.0f;
JobUnsuccessfulCompletionEvent unsuccessfulJobEvent =
new JobUnsuccessfulCompletionEvent(oldJobId,
finishTime,
succeededMapTaskCount,
succeededReduceTaskCount,
finalState.toString(),
diagnostics);
eventHandler.handle(new JobHistoryEvent(jobId,
unsuccessfulJobEvent));
finished(finalState);
}
private static class JobAbortCompletedTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
JobStateInternal finalState = JobStateInternal.valueOf(
((JobAbortCompletedEvent) event).getFinalState().name());
job.unsuccessfulFinish(finalState);
}
}
//This transition happens when a job is to be failed. It waits for all the
//tasks to finish / be killed.
private static class JobFailWaitTransition
implements MultipleArcTransition<JobImpl, JobEvent, JobStateInternal> {
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
if(!job.failWaitTriggerScheduledFuture.isCancelled()) {
for(Task task: job.tasks.values()) {
if(!task.isFinished()) {
return JobStateInternal.FAIL_WAIT;
}
}
}
//Finished waiting. All tasks finished / were killed
job.failWaitTriggerScheduledFuture.cancel(false);
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
return JobStateInternal.FAIL_ABORT;
}
}
//This transition happens when a job to be failed times out while waiting on
//tasks that had been sent the KILL signal. It is triggered by a
//ScheduledFuture task queued in the executor.
private static class JobFailWaitTimedOutTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
LOG.info("Timeout expired in FAIL_WAIT waiting for tasks to get killed."
+ " Going to fail job anyway");
job.failWaitTriggerScheduledFuture.cancel(false);
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
}
}
// JobFinishedEvent triggers the move of the history file out of the staging
// area. May need to create a new event type for this if JobFinished should
// not be generated for KilledJobs, etc.
private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
job.mayBeConstructFinalFullCounters();
JobFinishedEvent jfe = new JobFinishedEvent(
job.oldJobId, job.finishTime,
job.succeededMapTaskCount, job.succeededReduceTaskCount,
job.failedMapTaskCount, job.failedReduceTaskCount,
job.finalMapCounters,
job.finalReduceCounters,
job.fullCounters);
return jfe;
}
private void mayBeConstructFinalFullCounters() {
// Calculating full-counters. This should happen only once for the job.
synchronized (this.fullCountersLock) {
if (this.fullCounters != null) {
// Already constructed. Just return.
return;
}
this.constructFinalFullcounters();
}
}
@Private
public void constructFinalFullcounters() {
this.fullCounters = new Counters();
this.finalMapCounters = new Counters();
this.finalReduceCounters = new Counters();
this.fullCounters.incrAllCounters(jobCounters);
for (Task t : this.tasks.values()) {
Counters counters = t.getCounters();
switch (t.getType()) {
case MAP:
this.finalMapCounters.incrAllCounters(counters);
break;
case REDUCE:
this.finalReduceCounters.incrAllCounters(counters);
break;
default:
throw new IllegalStateException("Task type neither map nor reduce: " +
t.getType());
}
this.fullCounters.incrAllCounters(counters);
}
}
// Task-start has been moved out of InitTransition, so this arc simply
// hardcodes 0 for both map and reduce finished tasks.
private static class KillNewJobTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.setFinishTime();
JobUnsuccessfulCompletionEvent failedEvent =
new JobUnsuccessfulCompletionEvent(job.oldJobId,
job.finishTime, 0, 0,
JobStateInternal.KILLED.toString(), job.diagnostics);
job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
job.finished(JobStateInternal.KILLED);
}
}
private static class KillInitedJobTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.addDiagnostic("Job received Kill in INITED state.");
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.KILLED));
}
}
private static class KilledDuringSetupTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.metrics.endRunningJob(job);
job.addDiagnostic("Job received kill in SETUP state.");
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.KILLED));
}
}
private static class KillTasksTransition
implements SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.addDiagnostic(JOB_KILLED_DIAG);
for (Task task : job.tasks.values()) {
job.eventHandler.handle(
new TaskEvent(task.getID(), TaskEventType.T_KILL));
}
job.metrics.endRunningJob(job);
}
}
private static class TaskAttemptCompletedEventTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
TaskAttemptCompletionEvent tce =
((JobTaskAttemptCompletedEvent) event).getCompletionEvent();
// Add the TaskAttemptCompletionEvent
//eventId is equal to index in the arraylist
tce.setEventId(job.taskAttemptCompletionEvents.size());
job.taskAttemptCompletionEvents.add(tce);
int mapEventIdx = -1;
if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
// we track map completions separately from task completions because
// - getMapAttemptCompletionEvents uses index ranges specific to maps
// - type converting the same events over and over is expensive
mapEventIdx = job.mapAttemptCompletionEvents.size();
job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
}
job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
TaskAttemptId attemptId = tce.getAttemptId();
TaskId taskId = attemptId.getTaskId();
//make the previous completion event as obsolete if it exists
Integer successEventNo =
job.successAttemptCompletionEventNoMap.remove(taskId);
if (successEventNo != null) {
TaskAttemptCompletionEvent successEvent =
job.taskAttemptCompletionEvents.get(successEventNo);
successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
int mapCompletionIdx =
job.taskCompletionIdxToMapCompletionIdx.get(successEventNo);
if (mapCompletionIdx >= 0) {
// update the corresponding TaskCompletionEvent for the map
TaskCompletionEvent mapEvent =
job.mapAttemptCompletionEvents.get(mapCompletionIdx);
job.mapAttemptCompletionEvents.set(mapCompletionIdx,
new TaskCompletionEvent(mapEvent.getEventId(),
mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(),
mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE,
mapEvent.getTaskTrackerHttp()));
}
}
// if this attempt is not successful then why is the previous successful
// attempt being removed above - MAPREDUCE-4330
if (TaskAttemptCompletionEventStatus.SUCCEEDED.equals(tce.getStatus())) {
job.successAttemptCompletionEventNoMap.put(taskId, tce.getEventId());
// here we could have simply called Task.getSuccessfulAttempt() but
// the event that triggers this code is sent before
// Task.successfulAttempt is set and so there is no guarantee that it
// will be available now
Task task = job.tasks.get(taskId);
TaskAttempt attempt = task.getAttempt(attemptId);
NodeId nodeId = attempt.getNodeId();
assert (nodeId != null); // node must exist for a successful event
List<TaskAttemptId> taskAttemptIdList = job.nodesToSucceededTaskAttempts
.get(nodeId);
if (taskAttemptIdList == null) {
taskAttemptIdList = new ArrayList<TaskAttemptId>();
job.nodesToSucceededTaskAttempts.put(nodeId, taskAttemptIdList);
}
taskAttemptIdList.add(attempt.getID());
}
}
}
private static class TaskAttemptFetchFailureTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
//get number of shuffling reduces
int shufflingReduceTasks = 0;
for (TaskId taskId : job.reduceTasks) {
Task task = job.tasks.get(taskId);
if (TaskState.RUNNING.equals(task.getState())) {
for(TaskAttempt attempt : task.getAttempts().values()) {
if(attempt.getPhase() == Phase.SHUFFLE) {
shufflingReduceTasks++;
break;
}
}
}
}
JobTaskAttemptFetchFailureEvent fetchfailureEvent =
(JobTaskAttemptFetchFailureEvent) event;
for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId mapId :
fetchfailureEvent.getMaps()) {
Integer fetchFailures = job.fetchFailuresMapping.get(mapId);
fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
job.fetchFailuresMapping.put(mapId, fetchFailures);
float failureRate = shufflingReduceTasks == 0 ? 1.0f :
(float) fetchFailures / shufflingReduceTasks;
// declare faulty if fetch-failures >= max-allowed-failures
if (fetchFailures >= job.getMaxFetchFailuresNotifications()
&& failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
LOG.info("Too many fetch-failures for output of task attempt: " +
mapId + " ... raising fetch failure to map");
job.eventHandler.handle(new TaskAttemptTooManyFetchFailureEvent(mapId,
fetchfailureEvent.getReduce(), fetchfailureEvent.getHost()));
job.fetchFailuresMapping.remove(mapId);
}
}
}
}
private static class TaskCompletedTransition implements
MultipleArcTransition<JobImpl, JobEvent, JobStateInternal> {
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
job.completedTaskCount++;
LOG.info("Num completed Tasks: " + job.completedTaskCount);
JobTaskEvent taskEvent = (JobTaskEvent) event;
Task task = job.tasks.get(taskEvent.getTaskID());
if (taskEvent.getState() == TaskState.SUCCEEDED) {
taskSucceeded(job, task);
} else if (taskEvent.getState() == TaskState.FAILED) {
taskFailed(job, task);
} else if (taskEvent.getState() == TaskState.KILLED) {
taskKilled(job, task);
}
return checkJobAfterTaskCompletion(job);
}
//This class is used to queue a ScheduledFuture to send an event to a job
//after some delay. This can be used to wait for maximum amount of time
//before proceeding anyway. e.g. When a job is waiting in FAIL_WAIT for
//all tasks to be killed.
static class TriggerScheduledFuture implements Runnable {
JobEvent toSend;
JobImpl job;
TriggerScheduledFuture(JobImpl job, JobEvent toSend) {
this.toSend = toSend;
this.job = job;
}
public void run() {
LOG.info("Sending event " + toSend + " to " + job.getID());
job.getEventHandler().handle(toSend);
}
}
protected JobStateInternal checkJobAfterTaskCompletion(JobImpl job) {
//check for Job failure
if (job.failedMapTaskCount*100 >
job.allowedMapFailuresPercent*job.numMapTasks ||
job.failedReduceTaskCount*100 >
job.allowedReduceFailuresPercent*job.numReduceTasks) {
job.setFinishTime();
String diagnosticMsg = "Job failed as tasks failed. " +
"failedMaps:" + job.failedMapTaskCount +
" failedReduces:" + job.failedReduceTaskCount;
LOG.info(diagnosticMsg);
job.addDiagnostic(diagnosticMsg);
//Send kill signal to all unfinished tasks here.
boolean allDone = true;
for (Task task : job.tasks.values()) {
if(!task.isFinished()) {
allDone = false;
job.eventHandler.handle(
new TaskEvent(task.getID(), TaskEventType.T_KILL));
}
}
//If all tasks are already done, we should go directly to FAIL_ABORT
if(allDone) {
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED)
);
return JobStateInternal.FAIL_ABORT;
}
//Set max timeout to wait for the tasks to get killed
job.failWaitTriggerScheduledFuture = job.executor.schedule(
new TriggerScheduledFuture(job, new JobEvent(job.getID(),
JobEventType.JOB_FAIL_WAIT_TIMEDOUT)), job.conf.getInt(
MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS),
TimeUnit.MILLISECONDS);
return JobStateInternal.FAIL_WAIT;
}
return job.checkReadyForCommit();
}
private void taskSucceeded(JobImpl job, Task task) {
if (task.getType() == TaskType.MAP) {
job.succeededMapTaskCount++;
} else {
job.succeededReduceTaskCount++;
}
job.metrics.completedTask(task);
}
private void taskFailed(JobImpl job, Task task) {
if (task.getType() == TaskType.MAP) {
job.failedMapTaskCount++;
} else if (task.getType() == TaskType.REDUCE) {
job.failedReduceTaskCount++;
}
job.addDiagnostic("Task failed " + task.getID());
job.metrics.failedTask(task);
}
private void taskKilled(JobImpl job, Task task) {
if (task.getType() == TaskType.MAP) {
job.killedMapTaskCount++;
} else if (task.getType() == TaskType.REDUCE) {
job.killedReduceTaskCount++;
}
job.metrics.killedTask(task);
}
}
// Transition class for handling jobs with no tasks
private static class JobNoTasksCompletedTransition implements
MultipleArcTransition<JobImpl, JobEvent, JobStateInternal> {
@Override
public JobStateInternal transition(JobImpl job, JobEvent event) {
return job.checkReadyForCommit();
}
}
private static class CommitSucceededTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.logJobHistoryFinishedEvent();
job.finished(JobStateInternal.SUCCEEDED);
}
}
private static class CommitFailedTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
JobCommitFailedEvent jcfe = (JobCommitFailedEvent)event;
job.addDiagnostic("Job commit failed: " + jcfe.getMessage());
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
}
}
private static class KilledDuringCommitTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.setFinishTime();
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.KILLED));
}
}
private static class KilledDuringAbortTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.unsuccessfulFinish(JobStateInternal.KILLED);
}
}
private static class MapTaskRescheduledTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
//succeeded map task is restarted back
job.completedTaskCount--;
job.succeededMapTaskCount--;
}
}
private static class KillWaitTaskCompletedTransition extends
TaskCompletedTransition {
@Override
protected JobStateInternal checkJobAfterTaskCompletion(JobImpl job) {
if (job.completedTaskCount == job.tasks.size()) {
job.setFinishTime();
job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
job.jobContext,
org.apache.hadoop.mapreduce.JobStatus.State.KILLED));
return JobStateInternal.KILL_ABORT;
}
//return the current state, Job not finished yet
return job.getInternalState();
}
}
protected void addDiagnostic(String diag) {
diagnostics.add(diag);
}
private static class DiagnosticsUpdateTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
job.addDiagnostic(((JobDiagnosticsUpdateEvent) event)
.getDiagnosticUpdate());
}
}
private static class CounterUpdateTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
.getCounterUpdates()) {
job.jobCounters.findCounter(ci.getCounterKey()).increment(
ci.getIncrementValue());
}
}
}
private static class UpdatedNodesTransition implements
SingleArcTransition<JobImpl, JobEvent> {
@Override
public void transition(JobImpl job, JobEvent event) {
JobUpdatedNodesEvent updateEvent = (JobUpdatedNodesEvent) event;
for(NodeReport nr: updateEvent.getUpdatedNodes()) {
NodeState nodeState = nr.getNodeState();
if(nodeState.isUnusable()) {
// act on the updates
job.actOnUnusableNode(nr.getNodeId(), nodeState);
}
}
}
}
private static class InternalTerminationTransition implements
SingleArcTransition<JobImpl, JobEvent> {
JobStateInternal terminationState = null;
String jobHistoryString = null;
public InternalTerminationTransition(JobStateInternal stateInternal,
String jobHistoryString) {
this.terminationState = stateInternal;
//mostly a hack for jbhistoryserver
this.jobHistoryString = jobHistoryString;
}
@Override
public void transition(JobImpl job, JobEvent event) {
//TODO Is this JH event required.
job.setFinishTime();
JobUnsuccessfulCompletionEvent failedEvent =
new JobUnsuccessfulCompletionEvent(job.oldJobId,
job.finishTime, 0, 0,
jobHistoryString, job.diagnostics);
job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
job.finished(terminationState);
}
}
private static class InternalErrorTransition extends InternalTerminationTransition {
public InternalErrorTransition(){
super(JobStateInternal.ERROR, JobStateInternal.ERROR.toString());
}
}
private static class InternalRebootTransition extends InternalTerminationTransition {
public InternalRebootTransition(){
super(JobStateInternal.REBOOT, JobStateInternal.ERROR.toString());
}
}
@Override
public Configuration loadConfFile() throws IOException {
Path confPath = getConfFile();
FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(confPath), confPath.toString());
return jobConf;
}
public float getMaxAllowedFetchFailuresFraction() {
return maxAllowedFetchFailuresFraction;
}
public int getMaxFetchFailuresNotifications() {
return maxFetchFailuresNotifications;
}
}
| 86,563
| 38.25805
| 94
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.classification.InterfaceAudience;
| 943
| 43.952381
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
public enum CommitterEventType {
JOB_SETUP,
JOB_COMMIT,
JOB_ABORT,
TASK_ABORT
}
| 948
| 34.148148
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.yarn.event.AbstractEvent;
public class CommitterEvent extends AbstractEvent<CommitterEventType> {
public CommitterEvent(CommitterEventType type) {
super(type);
}
}
| 1,059
| 35.551724
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterTaskAbortEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public class CommitterTaskAbortEvent extends CommitterEvent {
private final TaskAttemptId attemptID;
private final TaskAttemptContext attemptContext;
public CommitterTaskAbortEvent(TaskAttemptId attemptID,
TaskAttemptContext attemptContext) {
super(CommitterEventType.TASK_ABORT);
this.attemptID = attemptID;
this.attemptContext = attemptContext;
}
public TaskAttemptId getAttemptID() {
return attemptID;
}
public TaskAttemptContext getAttemptContext() {
return attemptContext;
}
}
| 1,509
| 33.318182
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterEventHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import java.io.IOException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobAbortCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCommitFailedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobSetupCompletedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobSetupFailedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
public class CommitterEventHandler extends AbstractService
implements EventHandler<CommitterEvent> {
private static final Log LOG =
LogFactory.getLog(CommitterEventHandler.class);
private final AppContext context;
private final OutputCommitter committer;
private final RMHeartbeatHandler rmHeartbeatHandler;
private ThreadPoolExecutor launcherPool;
private Thread eventHandlingThread;
private BlockingQueue<CommitterEvent> eventQueue =
new LinkedBlockingQueue<CommitterEvent>();
private final AtomicBoolean stopped;
private final ClassLoader jobClassLoader;
private Thread jobCommitThread = null;
private int commitThreadCancelTimeoutMs;
private long commitWindowMs;
private FileSystem fs;
private Path startCommitFile;
private Path endCommitSuccessFile;
private Path endCommitFailureFile;
public CommitterEventHandler(AppContext context, OutputCommitter committer,
RMHeartbeatHandler rmHeartbeatHandler) {
this(context, committer, rmHeartbeatHandler, null);
}
public CommitterEventHandler(AppContext context, OutputCommitter committer,
RMHeartbeatHandler rmHeartbeatHandler, ClassLoader jobClassLoader) {
super("CommitterEventHandler");
this.context = context;
this.committer = committer;
this.rmHeartbeatHandler = rmHeartbeatHandler;
this.stopped = new AtomicBoolean(false);
this.jobClassLoader = jobClassLoader;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
commitThreadCancelTimeoutMs = conf.getInt(
MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS);
commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
try {
fs = FileSystem.get(conf);
JobID id = TypeConverter.fromYarn(context.getApplicationID());
JobId jobId = TypeConverter.toYarn(id);
String user = UserGroupInformation.getCurrentUser().getShortUserName();
startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
@Override
protected void serviceStart() throws Exception {
ThreadFactoryBuilder tfBuilder = new ThreadFactoryBuilder()
.setNameFormat("CommitterEvent Processor #%d");
if (jobClassLoader != null) {
// if the job classloader is enabled, we need to use the job classloader
// as the thread context classloader (TCCL) of these threads in case the
// committer needs to load another class via TCCL
ThreadFactory backingTf = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread thread = new Thread(r);
thread.setContextClassLoader(jobClassLoader);
return thread;
}
};
tfBuilder.setThreadFactory(backingTf);
}
ThreadFactory tf = tfBuilder.build();
launcherPool = new ThreadPoolExecutor(5, 5, 1,
TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
CommitterEvent event = null;
while (!stopped.get() && !Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if (!stopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(new EventProcessor(event)); }
}
});
eventHandlingThread.setName("CommitterEvent Handler");
eventHandlingThread.start();
super.serviceStart();
}
@Override
public void handle(CommitterEvent event) {
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e);
}
}
@Override
protected void serviceStop() throws Exception {
if (stopped.getAndSet(true)) {
// return if already stopped
return;
}
if (eventHandlingThread != null) {
eventHandlingThread.interrupt();
}
if (launcherPool != null) {
launcherPool.shutdown();
}
super.serviceStop();
}
private synchronized void jobCommitStarted() throws IOException {
if (jobCommitThread != null) {
throw new IOException("Commit while another commit thread active: "
+ jobCommitThread.toString());
}
jobCommitThread = Thread.currentThread();
}
private synchronized void jobCommitEnded() {
if (jobCommitThread == Thread.currentThread()) {
jobCommitThread = null;
notifyAll();
}
}
private synchronized void cancelJobCommit() {
Thread threadCommitting = jobCommitThread;
if (threadCommitting != null && threadCommitting.isAlive()) {
LOG.info("Cancelling commit");
threadCommitting.interrupt();
// wait up to configured timeout for commit thread to finish
long now = context.getClock().getTime();
long timeoutTimestamp = now + commitThreadCancelTimeoutMs;
try {
while (jobCommitThread == threadCommitting
&& now > timeoutTimestamp) {
wait(now - timeoutTimestamp);
now = context.getClock().getTime();
}
} catch (InterruptedException e) {
}
}
}
private class EventProcessor implements Runnable {
private CommitterEvent event;
EventProcessor(CommitterEvent event) {
this.event = event;
}
@Override
public void run() {
LOG.info("Processing the event " + event.toString());
switch (event.getType()) {
case JOB_SETUP:
handleJobSetup((CommitterJobSetupEvent) event);
break;
case JOB_COMMIT:
handleJobCommit((CommitterJobCommitEvent) event);
break;
case JOB_ABORT:
handleJobAbort((CommitterJobAbortEvent) event);
break;
case TASK_ABORT:
handleTaskAbort((CommitterTaskAbortEvent) event);
break;
default:
throw new YarnRuntimeException("Unexpected committer event "
+ event.toString());
}
}
@SuppressWarnings("unchecked")
protected void handleJobSetup(CommitterJobSetupEvent event) {
try {
committer.setupJob(event.getJobContext());
context.getEventHandler().handle(
new JobSetupCompletedEvent(event.getJobID()));
} catch (Exception e) {
LOG.warn("Job setup failed", e);
context.getEventHandler().handle(new JobSetupFailedEvent(
event.getJobID(), StringUtils.stringifyException(e)));
}
}
private void touchz(Path p) throws IOException {
fs.create(p, false).close();
}
@SuppressWarnings("unchecked")
protected void handleJobCommit(CommitterJobCommitEvent event) {
try {
touchz(startCommitFile);
jobCommitStarted();
waitForValidCommitWindow();
committer.commitJob(event.getJobContext());
touchz(endCommitSuccessFile);
context.getEventHandler().handle(
new JobCommitCompletedEvent(event.getJobID()));
} catch (Exception e) {
try {
touchz(endCommitFailureFile);
} catch (Exception e2) {
LOG.error("could not create failure file.", e2);
}
LOG.error("Could not commit job", e);
context.getEventHandler().handle(
new JobCommitFailedEvent(event.getJobID(),
StringUtils.stringifyException(e)));
} finally {
jobCommitEnded();
}
}
@SuppressWarnings("unchecked")
protected void handleJobAbort(CommitterJobAbortEvent event) {
cancelJobCommit();
try {
committer.abortJob(event.getJobContext(), event.getFinalState());
} catch (Exception e) {
LOG.warn("Could not abort job", e);
}
context.getEventHandler().handle(new JobAbortCompletedEvent(
event.getJobID(), event.getFinalState()));
}
@SuppressWarnings("unchecked")
protected void handleTaskAbort(CommitterTaskAbortEvent event) {
try {
committer.abortTask(event.getAttemptContext());
} catch (Exception e) {
LOG.warn("Task cleanup failed for attempt " + event.getAttemptID(), e);
}
context.getEventHandler().handle(
new TaskAttemptEvent(event.getAttemptID(),
TaskAttemptEventType.TA_CLEANUP_DONE));
}
private synchronized void waitForValidCommitWindow()
throws InterruptedException {
long lastHeartbeatTime = rmHeartbeatHandler.getLastHeartbeatTime();
long now = context.getClock().getTime();
while (now - lastHeartbeatTime > commitWindowMs) {
rmHeartbeatHandler.runOnNextHeartbeat(new Runnable() {
@Override
public void run() {
synchronized (EventProcessor.this) {
EventProcessor.this.notify();
}
}
});
wait();
lastHeartbeatTime = rmHeartbeatHandler.getLastHeartbeatTime();
now = context.getClock().getTime();
}
}
}
}
| 12,064
| 34.381232
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterJobSetupEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class CommitterJobSetupEvent extends CommitterEvent {
private JobId jobID;
private JobContext jobContext;
public CommitterJobSetupEvent(JobId jobID, JobContext jobContext) {
super(CommitterEventType.JOB_SETUP);
this.jobID = jobID;
this.jobContext = jobContext;
}
public JobId getJobID() {
return jobID;
}
public JobContext getJobContext() {
return jobContext;
}
}
| 1,376
| 31.023256
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterJobAbortEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class CommitterJobAbortEvent extends CommitterEvent {
private JobId jobID;
private JobContext jobContext;
private JobStatus.State finalState;
public CommitterJobAbortEvent(JobId jobID, JobContext jobContext,
JobStatus.State finalState) {
super(CommitterEventType.JOB_ABORT);
this.jobID = jobID;
this.jobContext = jobContext;
this.finalState = finalState;
}
public JobId getJobID() {
return jobID;
}
public JobContext getJobContext() {
return jobContext;
}
public JobStatus.State getFinalState() {
return finalState;
}
}
| 1,599
| 30.372549
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/commit/CommitterJobCommitEvent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.app.commit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class CommitterJobCommitEvent extends CommitterEvent {
private JobId jobID;
private JobContext jobContext;
public CommitterJobCommitEvent(JobId jobID, JobContext jobContext) {
super(CommitterEventType.JOB_COMMIT);
this.jobID = jobID;
this.jobContext = jobContext;
}
public JobId getJobID() {
return jobID;
}
public JobContext getJobContext() {
return jobContext;
}
}
| 1,379
| 31.093023
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.ScheduledExecutorService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSError;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.security.TokenCache;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.util.ConverterUtils;
/**
* The main() for MapReduce task processes.
*/
class YarnChild {
private static final Log LOG = LogFactory.getLog(YarnChild.class);
static volatile TaskAttemptID taskid = null;
public static void main(String[] args) throws Throwable {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
LOG.debug("Child starting");
final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
// Initing with our JobConf allows us to avoid loading confs twice
Limits.init(job);
UserGroupInformation.setConfiguration(job);
String host = args[0];
int port = Integer.parseInt(args[1]);
final InetSocketAddress address =
NetUtils.createSocketAddrForHost(host, port);
final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
long jvmIdLong = Long.parseLong(args[3]);
JVMId jvmId = new JVMId(firstTaskid.getJobID(),
firstTaskid.getTaskType() == TaskType.MAP, jvmIdLong);
// initialize metrics
DefaultMetricsSystem.initialize(
StringUtils.camelize(firstTaskid.getTaskType().name()) +"Task");
// Security framework already loaded the tokens into current ugi
Credentials credentials =
UserGroupInformation.getCurrentUser().getCredentials();
LOG.info("Executing with tokens:");
for (Token<?> token: credentials.getAllTokens()) {
LOG.info(token);
}
// Create TaskUmbilicalProtocol as actual task owner.
UserGroupInformation taskOwner =
UserGroupInformation.createRemoteUser(firstTaskid.getJobID().toString());
Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
SecurityUtil.setTokenService(jt, address);
taskOwner.addToken(jt);
final TaskUmbilicalProtocol umbilical =
taskOwner.doAs(new PrivilegedExceptionAction<TaskUmbilicalProtocol>() {
@Override
public TaskUmbilicalProtocol run() throws Exception {
return (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class,
TaskUmbilicalProtocol.versionID, address, job);
}
});
// report non-pid to application master
JvmContext context = new JvmContext(jvmId, "-1000");
LOG.debug("PID: " + System.getenv().get("JVM_PID"));
Task task = null;
UserGroupInformation childUGI = null;
ScheduledExecutorService logSyncer = null;
try {
int idleLoopCount = 0;
JvmTask myTask = null;;
// poll for new task
for (int idle = 0; null == myTask; ++idle) {
long sleepTimeMilliSecs = Math.min(idle * 500, 1500);
LOG.info("Sleeping for " + sleepTimeMilliSecs
+ "ms before retrying again. Got null now.");
MILLISECONDS.sleep(sleepTimeMilliSecs);
myTask = umbilical.getTask(context);
}
if (myTask.shouldDie()) {
return;
}
task = myTask.getTask();
YarnChild.taskid = task.getTaskID();
// Create the job-conf and set credentials
configureTask(job, task, credentials, jt);
// log the system properties
String systemPropsToLog = MRApps.getSystemPropertiesToLog(job);
if (systemPropsToLog != null) {
LOG.info(systemPropsToLog);
}
// Initiate Java VM metrics
JvmMetrics.initSingleton(jvmId.toString(), job.getSessionId());
childUGI = UserGroupInformation.createRemoteUser(System
.getenv(ApplicationConstants.Environment.USER.toString()));
// Add tokens to new user so that it may execute its task correctly.
childUGI.addCredentials(credentials);
// set job classloader if configured before invoking the task
MRApps.setJobClassLoader(job);
logSyncer = TaskLog.createLogSyncer();
// Create a final reference to the task for the doAs block
final Task taskFinal = task;
childUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
// use job-specified working directory
setEncryptedSpillKeyIfRequired(taskFinal);
FileSystem.get(job).setWorkingDirectory(job.getWorkingDirectory());
taskFinal.run(job, umbilical); // run the task
return null;
}
});
} catch (FSError e) {
LOG.fatal("FSError from child", e);
if (!ShutdownHookManager.get().isShutdownInProgress()) {
umbilical.fsError(taskid, e.getMessage());
}
} catch (Exception exception) {
LOG.warn("Exception running child : "
+ StringUtils.stringifyException(exception));
try {
if (task != null) {
// do cleanup for the task
if (childUGI == null) { // no need to job into doAs block
task.taskCleanup(umbilical);
} else {
final Task taskFinal = task;
childUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
taskFinal.taskCleanup(umbilical);
return null;
}
});
}
}
} catch (Exception e) {
LOG.info("Exception cleaning up: " + StringUtils.stringifyException(e));
}
// Report back any failures, for diagnostic purposes
if (taskid != null) {
if (!ShutdownHookManager.get().isShutdownInProgress()) {
umbilical.fatalError(taskid,
StringUtils.stringifyException(exception));
}
}
} catch (Throwable throwable) {
LOG.fatal("Error running child : "
+ StringUtils.stringifyException(throwable));
if (taskid != null) {
if (!ShutdownHookManager.get().isShutdownInProgress()) {
Throwable tCause = throwable.getCause();
String cause =
tCause == null ? throwable.getMessage() : StringUtils
.stringifyException(tCause);
umbilical.fatalError(taskid, cause);
}
}
} finally {
RPC.stopProxy(umbilical);
DefaultMetricsSystem.shutdown();
TaskLog.syncLogsShutdown(logSyncer);
}
}
/**
* Utility method to check if the Encrypted Spill Key needs to be set into the
* user credentials of the user running the Map / Reduce Task
* @param task The Map / Reduce task to set the Encrypted Spill information in
* @throws Exception
*/
public static void setEncryptedSpillKeyIfRequired(Task task) throws
Exception {
if ((task != null) && (task.getEncryptedSpillKey() != null) && (task
.getEncryptedSpillKey().length > 1)) {
Credentials creds =
UserGroupInformation.getCurrentUser().getCredentials();
TokenCache.setEncryptedSpillKey(task.getEncryptedSpillKey(), creds);
UserGroupInformation.getCurrentUser().addCredentials(creds);
}
}
/**
* Configure mapred-local dirs. This config is used by the task for finding
* out an output directory.
* @throws IOException
*/
private static void configureLocalDirs(Task task, JobConf job) throws IOException {
String[] localSysDirs = StringUtils.getTrimmedStrings(
System.getenv(Environment.LOCAL_DIRS.name()));
job.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
LOG.info(MRConfig.LOCAL_DIR + " for child: " + job.get(MRConfig.LOCAL_DIR));
LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
Path workDir = null;
// First, try to find the JOB_LOCAL_DIR on this host.
try {
workDir = lDirAlloc.getLocalPathToRead("work", job);
} catch (DiskErrorException e) {
// DiskErrorException means dir not found. If not found, it will
// be created below.
}
if (workDir == null) {
// JOB_LOCAL_DIR doesn't exist on this host -- Create it.
workDir = lDirAlloc.getLocalPathForWrite("work", job);
FileSystem lfs = FileSystem.getLocal(job).getRaw();
boolean madeDir = false;
try {
madeDir = lfs.mkdirs(workDir);
} catch (FileAlreadyExistsException e) {
// Since all tasks will be running in their own JVM, the race condition
// exists where multiple tasks could be trying to create this directory
// at the same time. If this task loses the race, it's okay because
// the directory already exists.
madeDir = true;
workDir = lDirAlloc.getLocalPathToRead("work", job);
}
if (!madeDir) {
throw new IOException("Mkdirs failed to create "
+ workDir.toString());
}
}
job.set(MRJobConfig.JOB_LOCAL_DIR,workDir.toString());
}
private static void configureTask(JobConf job, Task task,
Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
job.setCredentials(credentials);
ApplicationAttemptId appAttemptId =
ConverterUtils.toContainerId(
System.getenv(Environment.CONTAINER_ID.name()))
.getApplicationAttemptId();
LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
// Set it in conf, so as to be able to be used the the OutputCommitter.
job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
appAttemptId.getAttemptId());
// set tcp nodelay
job.setBoolean("ipc.client.tcpnodelay", true);
job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
YarnOutputFiles.class, MapOutputFile.class);
// set the jobToken and shuffle secrets into task
task.setJobTokenSecret(
JobTokenSecretManager.createSecretKey(jt.getPassword()));
byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
if (shuffleSecret == null) {
LOG.warn("Shuffle secret missing from task credentials."
+ " Using job token secret as shuffle secret.");
shuffleSecret = jt.getPassword();
}
task.setShuffleSecret(
JobTokenSecretManager.createSecretKey(shuffleSecret));
// setup the child's MRConfig.LOCAL_DIR.
configureLocalDirs(task, job);
// setup the child's attempt directories
// Do the task-type specific localization
task.localizeConfiguration(job);
// Set up the DistributedCache related configs
MRApps.setupDistributedCacheLocal(job);
// Overwrite the localized task jobconf which is linked to in the current
// work-dir.
Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);
}
private static final FsPermission urw_gr =
FsPermission.createImmutable((short) 0640);
/**
* Write the task specific job-configuration file.
* @throws IOException
*/
private static void writeLocalJobFile(Path jobFile, JobConf conf)
throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
localFs.delete(jobFile);
OutputStream out = null;
try {
out = FileSystem.create(localFs, jobFile, urw_gr);
conf.writeXml(out);
} finally {
IOUtils.cleanup(LOG, out);
}
}
}
| 13,632
| 37.511299
| 85
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapTaskAttemptImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
@SuppressWarnings("rawtypes")
public class MapTaskAttemptImpl extends TaskAttemptImpl {
private final TaskSplitMetaInfo splitInfo;
public MapTaskAttemptImpl(TaskId taskId, int attempt,
EventHandler eventHandler, Path jobFile,
int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext) {
super(taskId, attempt, eventHandler,
taskAttemptListener, jobFile, partition, conf, splitInfo.getLocations(),
jobToken, credentials, clock, appContext);
this.splitInfo = splitInfo;
}
@Override
public Task createRemoteTask() {
//job file name is set in TaskAttempt, setting it null here
MapTask mapTask =
new MapTask("", TypeConverter.fromYarn(getID()), partition,
splitInfo.getSplitIndex(), 1); // YARN doesn't have the concept of slots per task, set it as 1.
mapTask.setUser(conf.get(MRJobConfig.USER_NAME));
mapTask.setConf(conf);
return mapTask;
}
}
| 2,621
| 39.338462
| 105
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapred.SortedRanges.Range;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
/**
* This class is responsible for talking to the task umblical.
* It also converts all the old data structures
* to yarn data structures.
*
* This class HAS to be in this package to access package private
* methods/classes.
*/
@SuppressWarnings({"unchecked"})
public class TaskAttemptListenerImpl extends CompositeService
implements TaskUmbilicalProtocol, TaskAttemptListener {
private static final JvmTask TASK_FOR_INVALID_JVM = new JvmTask(null, true);
private static final Log LOG = LogFactory.getLog(TaskAttemptListenerImpl.class);
private AppContext context;
private Server server;
protected TaskHeartbeatHandler taskHeartbeatHandler;
private RMHeartbeatHandler rmHeartbeatHandler;
private long commitWindowMs;
private InetSocketAddress address;
private ConcurrentMap<WrappedJvmID, org.apache.hadoop.mapred.Task>
jvmIDToActiveAttemptMap
= new ConcurrentHashMap<WrappedJvmID, org.apache.hadoop.mapred.Task>();
private Set<WrappedJvmID> launchedJVMs = Collections
.newSetFromMap(new ConcurrentHashMap<WrappedJvmID, Boolean>());
private JobTokenSecretManager jobTokenSecretManager = null;
private byte[] encryptedSpillKey;
public TaskAttemptListenerImpl(AppContext context,
JobTokenSecretManager jobTokenSecretManager,
RMHeartbeatHandler rmHeartbeatHandler,
byte[] secretShuffleKey) {
super(TaskAttemptListenerImpl.class.getName());
this.context = context;
this.jobTokenSecretManager = jobTokenSecretManager;
this.rmHeartbeatHandler = rmHeartbeatHandler;
this.encryptedSpillKey = secretShuffleKey;
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
registerHeartbeatHandler(conf);
commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
super.serviceInit(conf);
}
@Override
protected void serviceStart() throws Exception {
startRpcServer();
super.serviceStart();
}
protected void registerHeartbeatHandler(Configuration conf) {
taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(),
context.getClock(), conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT,
MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT));
addService(taskHeartbeatHandler);
}
protected void startRpcServer() {
Configuration conf = getConfig();
try {
server =
new RPC.Builder(conf).setProtocol(TaskUmbilicalProtocol.class)
.setInstance(this).setBindAddress("0.0.0.0")
.setPort(0).setNumHandlers(
conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT,
MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT))
.setVerbose(false).setSecretManager(jobTokenSecretManager)
.build();
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
false)) {
refreshServiceAcls(conf, new MRAMPolicyProvider());
}
server.start();
this.address = NetUtils.createSocketAddrForHost(
context.getNMHostname(),
server.getListenerAddress().getPort());
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
}
void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) {
this.server.refreshServiceAcl(configuration, policyProvider);
}
@Override
protected void serviceStop() throws Exception {
stopRpcServer();
super.serviceStop();
}
protected void stopRpcServer() {
if (server != null) {
server.stop();
}
}
@Override
public InetSocketAddress getAddress() {
return address;
}
/**
* Child checking whether it can commit.
*
* <br>
* Commit is a two-phased protocol. First the attempt informs the
* ApplicationMaster that it is
* {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
* the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
* a legacy from the centralized commit protocol handling by the JobTracker.
*/
@Override
public boolean canCommit(TaskAttemptID taskAttemptID) throws IOException {
LOG.info("Commit go/no-go request from " + taskAttemptID.toString());
// An attempt is asking if it can commit its output. This can be decided
// only by the task which is managing the multiple attempts. So redirect the
// request there.
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
// tell task to retry later if AM has not heard from RM within the commit
// window to help avoid double-committing in a split-brain situation
long now = context.getClock().getTime();
if (now - rmHeartbeatHandler.getLastHeartbeatTime() > commitWindowMs) {
return false;
}
Job job = context.getJob(attemptID.getTaskId().getJobId());
Task task = job.getTask(attemptID.getTaskId());
return task.canCommit(attemptID);
}
/**
* TaskAttempt is reporting that it is in commit_pending and it is waiting for
* the commit Response
*
* <br>
* Commit it a two-phased protocol. First the attempt informs the
* ApplicationMaster that it is
* {@link #commitPending(TaskAttemptID, TaskStatus)}. Then it repeatedly polls
* the ApplicationMaster whether it {@link #canCommit(TaskAttemptID)} This is
* a legacy from the centralized commit protocol handling by the JobTracker.
*/
@Override
public void commitPending(TaskAttemptID taskAttemptID, TaskStatus taskStatsu)
throws IOException, InterruptedException {
LOG.info("Commit-pending state update from " + taskAttemptID.toString());
// An attempt is asking if it can commit its output. This can be decided
// only by the task which is managing the multiple attempts. So redirect the
// request there.
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
//Ignorable TaskStatus? - since a task will send a LastStatusUpdate
context.getEventHandler().handle(
new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_COMMIT_PENDING));
}
@Override
public void done(TaskAttemptID taskAttemptID) throws IOException {
LOG.info("Done acknowledgement from " + taskAttemptID.toString());
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
context.getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
@Override
public void fatalError(TaskAttemptID taskAttemptID, String msg)
throws IOException {
// This happens only in Child and in the Task.
LOG.fatal("Task: " + taskAttemptID + " - exited : " + msg);
reportDiagnosticInfo(taskAttemptID, "Error: " + msg);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
context.getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
@Override
public void fsError(TaskAttemptID taskAttemptID, String message)
throws IOException {
// This happens only in Child.
LOG.fatal("Task: " + taskAttemptID + " - failed due to FSError: "
+ message);
reportDiagnosticInfo(taskAttemptID, "FSError: " + message);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
context.getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
}
@Override
public void shuffleError(TaskAttemptID taskAttemptID, String message) throws IOException {
// TODO: This isn't really used in any MR code. Ask for removal.
}
@Override
public MapTaskCompletionEventsUpdate getMapCompletionEvents(
JobID jobIdentifier, int startIndex, int maxEvents,
TaskAttemptID taskAttemptID) throws IOException {
LOG.info("MapCompletionEvents request from " + taskAttemptID.toString()
+ ". startIndex " + startIndex + " maxEvents " + maxEvents);
// TODO: shouldReset is never used. See TT. Ask for Removal.
boolean shouldReset = false;
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
TaskCompletionEvent[] events =
context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
startIndex, maxEvents);
taskHeartbeatHandler.progressing(attemptID);
return new MapTaskCompletionEventsUpdate(events, shouldReset);
}
@Override
public boolean ping(TaskAttemptID taskAttemptID) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Ping from " + taskAttemptID.toString());
}
return true;
}
@Override
public void reportDiagnosticInfo(TaskAttemptID taskAttemptID, String diagnosticInfo)
throws IOException {
diagnosticInfo = StringInterner.weakIntern(diagnosticInfo);
LOG.info("Diagnostics report from " + taskAttemptID.toString() + ": "
+ diagnosticInfo);
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(attemptID);
// This is mainly used for cases where we want to propagate exception traces
// of tasks that fail.
// This call exists as a hadoop mapreduce legacy wherein all changes in
// counters/progress/phase/output-size are reported through statusUpdate()
// call but not diagnosticInformation.
context.getEventHandler().handle(
new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnosticInfo));
}
@Override
public boolean statusUpdate(TaskAttemptID taskAttemptID,
TaskStatus taskStatus) throws IOException, InterruptedException {
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId yarnAttemptID =
TypeConverter.toYarn(taskAttemptID);
taskHeartbeatHandler.progressing(yarnAttemptID);
TaskAttemptStatus taskAttemptStatus =
new TaskAttemptStatus();
taskAttemptStatus.id = yarnAttemptID;
// Task sends the updated progress to the TT.
taskAttemptStatus.progress = taskStatus.getProgress();
LOG.info("Progress of TaskAttempt " + taskAttemptID + " is : "
+ taskStatus.getProgress());
// Task sends the updated state-string to the TT.
taskAttemptStatus.stateString = taskStatus.getStateString();
// Task sends the updated phase to the TT.
taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
// Counters are updated by the task. Convert counters into new format as
// that is the primary storage format inside the AM to avoid multiple
// conversions and unnecessary heap usage.
taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
taskStatus.getCounters());
// Map Finish time set by the task (map only)
if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
taskAttemptStatus.mapFinishTime = taskStatus.getMapFinishTime();
}
// Shuffle Finish time set by the task (reduce only).
if (!taskStatus.getIsMap() && taskStatus.getShuffleFinishTime() != 0) {
taskAttemptStatus.shuffleFinishTime = taskStatus.getShuffleFinishTime();
}
// Sort finish time set by the task (reduce only).
if (!taskStatus.getIsMap() && taskStatus.getSortFinishTime() != 0) {
taskAttemptStatus.sortFinishTime = taskStatus.getSortFinishTime();
}
// Not Setting the task state. Used by speculation - will be set in TaskAttemptImpl
//taskAttemptStatus.taskState = TypeConverter.toYarn(taskStatus.getRunState());
//set the fetch failures
if (taskStatus.getFetchFailedMaps() != null
&& taskStatus.getFetchFailedMaps().size() > 0) {
taskAttemptStatus.fetchFailedMaps =
new ArrayList<org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId>();
for (TaskAttemptID failedMapId : taskStatus.getFetchFailedMaps()) {
taskAttemptStatus.fetchFailedMaps.add(
TypeConverter.toYarn(failedMapId));
}
}
// Task sends the information about the nextRecordRange to the TT
// TODO: The following are not needed here, but needed to be set somewhere inside AppMaster.
// taskStatus.getRunState(); // Set by the TT/JT. Transform into a state TODO
// taskStatus.getStartTime(); // Used to be set by the TaskTracker. This should be set by getTask().
// taskStatus.getFinishTime(); // Used to be set by TT/JT. Should be set when task finishes
// // This was used by TT to do counter updates only once every minute. So this
// // isn't ever changed by the Task itself.
// taskStatus.getIncludeCounters();
context.getEventHandler().handle(
new TaskAttemptStatusUpdateEvent(taskAttemptStatus.id,
taskAttemptStatus));
return true;
}
@Override
public long getProtocolVersion(String arg0, long arg1) throws IOException {
return TaskUmbilicalProtocol.versionID;
}
@Override
public void reportNextRecordRange(TaskAttemptID taskAttemptID, Range range)
throws IOException {
// This is used when the feature of skipping records is enabled.
// This call exists as a hadoop mapreduce legacy wherein all changes in
// counters/progress/phase/output-size are reported through statusUpdate()
// call but not the next record range information.
throw new IOException("Not yet implemented.");
}
@Override
public JvmTask getTask(JvmContext context) throws IOException {
// A rough imitation of code from TaskTracker.
JVMId jvmId = context.jvmId;
LOG.info("JVM with ID : " + jvmId + " asked for a task");
JvmTask jvmTask = null;
// TODO: Is it an authorized container to get a task? Otherwise return null.
// TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update
// to jobId and task-type.
WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap,
jvmId.getId());
// Try to look up the task. We remove it directly as we don't give
// multiple tasks to a JVM
if (!jvmIDToActiveAttemptMap.containsKey(wJvmID)) {
LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed.");
jvmTask = TASK_FOR_INVALID_JVM;
} else {
if (!launchedJVMs.contains(wJvmID)) {
jvmTask = null;
LOG.info("JVM with ID: " + jvmId
+ " asking for task before AM launch registered. Given null task");
} else {
// remove the task as it is no more needed and free up the memory.
// Also we have already told the JVM to process a task, so it is no
// longer pending, and further request should ask it to exit.
org.apache.hadoop.mapred.Task task =
jvmIDToActiveAttemptMap.remove(wJvmID);
launchedJVMs.remove(wJvmID);
LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
task.setEncryptedSpillKey(encryptedSpillKey);
jvmTask = new JvmTask(task, false);
}
}
return jvmTask;
}
@Override
public void registerPendingTask(
org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
// Create the mapping so that it is easy to look up
// when the jvm comes back to ask for Task.
// A JVM not present in this map is an illegal task/JVM.
jvmIDToActiveAttemptMap.put(jvmID, task);
}
@Override
public void registerLaunchedTask(
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
WrappedJvmID jvmId) {
// The AM considers the task to be launched (Has asked the NM to launch it)
// The JVM will only be given a task after this registartion.
launchedJVMs.add(jvmId);
taskHeartbeatHandler.register(attemptID);
}
@Override
public void unregister(
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
WrappedJvmID jvmID) {
// Unregistration also comes from the same TaskAttempt which does the
// registration. Events are ordered at TaskAttempt, so unregistration will
// always come after registration.
// Remove from launchedJVMs before jvmIDToActiveAttemptMap to avoid
// synchronization issue with getTask(). getTask should be checking
// jvmIDToActiveAttemptMap before it checks launchedJVMs.
// remove the mappings if not already removed
launchedJVMs.remove(jvmID);
jvmIDToActiveAttemptMap.remove(jvmID);
//unregister this attempt
taskHeartbeatHandler.unregister(attemptID);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return ProtocolSignature.getProtocolSignature(this,
protocol, clientVersion, clientMethodsHash);
}
}
| 19,854
| 38.551793
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSError;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Runs the container task locally in a thread.
* Since all (sub)tasks share the same local directory, they must be executed
* sequentially in order to avoid creating/deleting the same files/dirs.
*/
public class LocalContainerLauncher extends AbstractService implements
ContainerLauncher {
private static final File curDir = new File(".");
private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.class);
private FileContext curFC = null;
private final HashSet<File> localizedFiles;
private final AppContext context;
private final TaskUmbilicalProtocol umbilical;
private final ClassLoader jobClassLoader;
private ExecutorService taskRunner;
private Thread eventHandler;
private byte[] encryptedSpillKey = new byte[] {0};
private BlockingQueue<ContainerLauncherEvent> eventQueue =
new LinkedBlockingQueue<ContainerLauncherEvent>();
public LocalContainerLauncher(AppContext context,
TaskUmbilicalProtocol umbilical) {
this(context, umbilical, null);
}
public LocalContainerLauncher(AppContext context,
TaskUmbilicalProtocol umbilical,
ClassLoader jobClassLoader) {
super(LocalContainerLauncher.class.getName());
this.context = context;
this.umbilical = umbilical;
// umbilical: MRAppMaster creates (taskAttemptListener), passes to us
// (TODO/FIXME: pointless to use RPC to talk to self; should create
// LocalTaskAttemptListener or similar: implement umbilical protocol
// but skip RPC stuff)
this.jobClassLoader = jobClassLoader;
try {
curFC = FileContext.getFileContext(curDir.toURI());
} catch (UnsupportedFileSystemException ufse) {
LOG.error("Local filesystem " + curDir.toURI().toString()
+ " is unsupported?? (should never happen)");
}
// Save list of files/dirs that are supposed to be present so can delete
// any extras created by one task before starting subsequent task. Note
// that there's no protection against deleted or renamed localization;
// users who do that get what they deserve (and will have to disable
// uberization in order to run correctly).
File[] curLocalFiles = curDir.listFiles();
localizedFiles = new HashSet<File>(curLocalFiles.length);
for (int j = 0; j < curLocalFiles.length; ++j) {
localizedFiles.add(curLocalFiles[j]);
}
// Relocalization note/future FIXME (per chrisdo, 20110315): At moment,
// full localization info is in AppSubmissionContext passed from client to
// RM and then to NM for AM-container launch: no difference between AM-
// localization and MapTask- or ReduceTask-localization, so can assume all
// OK. Longer-term, will need to override uber-AM container-localization
// request ("needed resources") with union of regular-AM-resources + task-
// resources (and, if maps and reduces ever differ, then union of all three
// types), OR will need localizer service/API that uber-AM can request
// after running (e.g., "localizeForTask()" or "localizeForMapTask()").
}
public void serviceStart() throws Exception {
// create a single thread for serial execution of tasks
// make it a daemon thread so that the process can exit even if the task is
// not interruptible
taskRunner =
Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().
setDaemon(true).setNameFormat("uber-SubtaskRunner").build());
// create and start an event handling thread
eventHandler = new Thread(new EventHandler(), "uber-EventHandler");
// if the job classloader is specified, set it onto the event handler as the
// thread context classloader so that it can be used by the event handler
// as well as the subtask runner threads
if (jobClassLoader != null) {
LOG.info("Setting " + jobClassLoader +
" as the context classloader of thread " + eventHandler.getName());
eventHandler.setContextClassLoader(jobClassLoader);
} else {
// note the current TCCL
LOG.info("Context classloader of thread " + eventHandler.getName() +
": " + eventHandler.getContextClassLoader());
}
eventHandler.start();
super.serviceStart();
}
public void serviceStop() throws Exception {
if (eventHandler != null) {
eventHandler.interrupt();
}
if (taskRunner != null) {
taskRunner.shutdownNow();
}
super.serviceStop();
}
@Override
public void handle(ContainerLauncherEvent event) {
try {
eventQueue.put(event);
} catch (InterruptedException e) {
throw new YarnRuntimeException(e); // FIXME? YarnRuntimeException is "for runtime exceptions only"
}
}
public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
if (encryptedSpillKey != null) {
this.encryptedSpillKey = encryptedSpillKey;
}
}
/*
* Uber-AM lifecycle/ordering ("normal" case):
*
* - [somebody] sends TA_ASSIGNED
* - handled by ContainerAssignedTransition (TaskAttemptImpl.java)
* - creates "remoteTask" for us == real Task
* - sends CONTAINER_REMOTE_LAUNCH
* - TA: UNASSIGNED -> ASSIGNED
* - CONTAINER_REMOTE_LAUNCH handled by LocalContainerLauncher (us)
* - sucks "remoteTask" out of TaskAttemptImpl via getRemoteTask()
* - sends TA_CONTAINER_LAUNCHED
* [[ elsewhere...
* - TA_CONTAINER_LAUNCHED handled by LaunchedContainerTransition
* - registers "remoteTask" with TaskAttemptListener (== umbilical)
* - NUKES "remoteTask"
* - sends T_ATTEMPT_LAUNCHED (Task: SCHEDULED -> RUNNING)
* - TA: ASSIGNED -> RUNNING
* ]]
* - runs Task (runSubMap() or runSubReduce())
* - TA can safely send TA_UPDATE since in RUNNING state
*/
private class EventHandler implements Runnable {
// doneWithMaps and finishedSubMaps are accessed from only
// one thread. Therefore, no need to make them volatile.
private boolean doneWithMaps = false;
private int finishedSubMaps = 0;
private final Map<TaskAttemptId,Future<?>> futures =
new ConcurrentHashMap<TaskAttemptId,Future<?>>();
EventHandler() {
}
@SuppressWarnings("unchecked")
@Override
public void run() {
ContainerLauncherEvent event = null;
// Collect locations of map outputs to give to reduces
final Map<TaskAttemptID, MapOutputFile> localMapFiles =
new HashMap<TaskAttemptID, MapOutputFile>();
// _must_ either run subtasks sequentially or accept expense of new JVMs
// (i.e., fork()), else will get weird failures when maps try to create/
// write same dirname or filename: no chdir() in Java
while (!Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) { // mostly via T_KILL? JOB_KILL?
LOG.warn("Returning, interrupted : " + e);
break;
}
LOG.info("Processing the event " + event.toString());
if (event.getType() == EventType.CONTAINER_REMOTE_LAUNCH) {
final ContainerRemoteLaunchEvent launchEv =
(ContainerRemoteLaunchEvent)event;
// execute the task on a separate thread
Future<?> future = taskRunner.submit(new Runnable() {
public void run() {
runTask(launchEv, localMapFiles);
}
});
// remember the current attempt
futures.put(event.getTaskAttemptID(), future);
} else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {
// cancel (and interrupt) the current running task associated with the
// event
TaskAttemptId taId = event.getTaskAttemptID();
Future<?> future = futures.remove(taId);
if (future != null) {
LOG.info("canceling the task attempt " + taId);
future.cancel(true);
}
// send "cleaned" event to task attempt to move us from
// SUCCESS_CONTAINER_CLEANUP to SUCCEEDED state (or
// {FAIL|KILL}_CONTAINER_CLEANUP to {FAIL|KILL}_TASK_CLEANUP)
context.getEventHandler().handle(
new TaskAttemptEvent(taId,
TaskAttemptEventType.TA_CONTAINER_CLEANED));
} else if (event.getType() == EventType.CONTAINER_COMPLETED) {
LOG.debug("Container completed " + event.toString());
} else {
LOG.warn("Ignoring unexpected event " + event.toString());
}
}
}
@SuppressWarnings("unchecked")
private void runTask(ContainerRemoteLaunchEvent launchEv,
Map<TaskAttemptID, MapOutputFile> localMapFiles) {
TaskAttemptId attemptID = launchEv.getTaskAttemptID();
Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
int numMapTasks = job.getTotalMaps();
int numReduceTasks = job.getTotalReduces();
// YARN (tracking) Task:
org.apache.hadoop.mapreduce.v2.app.job.Task ytask =
job.getTask(attemptID.getTaskId());
// classic mapred Task:
org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();
// after "launching," send launched event to task attempt to move
// state from ASSIGNED to RUNNING (also nukes "remoteTask", so must
// do getRemoteTask() call first)
//There is no port number because we are not really talking to a task
// tracker. The shuffle is just done through local files. So the
// port number is set to -1 in this case.
context.getEventHandler().handle(
new TaskAttemptContainerLaunchedEvent(attemptID, -1));
if (numMapTasks == 0) {
doneWithMaps = true;
}
try {
if (remoteTask.isMapOrReduce()) {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
if (remoteTask.isMapTask()) {
jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
} else {
jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
}
context.getEventHandler().handle(jce);
}
runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
(numReduceTasks > 0), localMapFiles);
// In non-uber mode, TA gets TA_CONTAINER_COMPLETED from MRAppMaster
// as part of NM -> RM -> AM notification route.
// In uber mode, given the task run inside the MRAppMaster container,
// we have to simulate the notification.
context.getEventHandler().handle(new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
} catch (RuntimeException re) {
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
context.getEventHandler().handle(jce);
// this is our signal that the subtask failed in some way, so
// simulate a failed JVM/container and send a container-completed
// event to task attempt (i.e., move state machine from RUNNING
// to FAIL_CONTAINER_CLEANUP [and ultimately to FAILED])
context.getEventHandler().handle(new TaskAttemptEvent(attemptID,
TaskAttemptEventType.TA_CONTAINER_COMPLETED));
} catch (IOException ioe) {
// if umbilical itself barfs (in error-handler of runSubMap()),
// we're pretty much hosed, so do what YarnChild main() does
// (i.e., exit clumsily--but can never happen, so no worries!)
LOG.fatal("oopsie... this can never happen: "
+ StringUtils.stringifyException(ioe));
ExitUtil.terminate(-1);
} finally {
// remove my future
if (futures.remove(attemptID) != null) {
LOG.info("removed attempt " + attemptID +
" from the futures to keep track of");
}
}
}
private void runSubtask(org.apache.hadoop.mapred.Task task,
final TaskType taskType,
TaskAttemptId attemptID,
final int numMapTasks,
boolean renameOutputs,
Map<TaskAttemptID, MapOutputFile> localMapFiles)
throws RuntimeException, IOException {
org.apache.hadoop.mapred.TaskAttemptID classicAttemptID =
TypeConverter.fromYarn(attemptID);
try {
JobConf conf = new JobConf(getConfig());
conf.set(JobContext.TASK_ID, task.getTaskID().toString());
conf.set(JobContext.TASK_ATTEMPT_ID, classicAttemptID.toString());
conf.setBoolean(JobContext.TASK_ISMAP, (taskType == TaskType.MAP));
conf.setInt(JobContext.TASK_PARTITION, task.getPartition());
conf.set(JobContext.ID, task.getJobID().toString());
// Use the AM's local dir env to generate the intermediate step
// output files
String[] localSysDirs = StringUtils.getTrimmedStrings(
System.getenv(Environment.LOCAL_DIRS.name()));
conf.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
LOG.info(MRConfig.LOCAL_DIR + " for uber task: "
+ conf.get(MRConfig.LOCAL_DIR));
// mark this as an uberized subtask so it can set task counter
// (longer-term/FIXME: could redefine as job counter and send
// "JobCounterEvent" to JobImpl on [successful] completion of subtask;
// will need new Job state-machine transition and JobImpl jobCounters
// map to handle)
conf.setBoolean("mapreduce.task.uberized", true);
// Check and handle Encrypted spill key
task.setEncryptedSpillKey(encryptedSpillKey);
YarnChild.setEncryptedSpillKeyIfRequired(task);
// META-FIXME: do we want the extra sanity-checking (doneWithMaps,
// etc.), or just assume/hope the state machine(s) and uber-AM work
// as expected?
if (taskType == TaskType.MAP) {
if (doneWithMaps) {
LOG.error("CONTAINER_REMOTE_LAUNCH contains a map task ("
+ attemptID + "), but should be finished with maps");
throw new RuntimeException();
}
MapTask map = (MapTask)task;
map.setConf(conf);
map.run(conf, umbilical);
if (renameOutputs) {
MapOutputFile renamed = renameMapOutputForReduce(conf, attemptID,
map.getMapOutputFile());
localMapFiles.put(classicAttemptID, renamed);
}
relocalize();
if (++finishedSubMaps == numMapTasks) {
doneWithMaps = true;
}
} else /* TaskType.REDUCE */ {
if (!doneWithMaps) {
// check if event-queue empty? whole idea of counting maps vs.
// checking event queue is a tad wacky...but could enforce ordering
// (assuming no "lost events") at LocalMRAppMaster [CURRENT BUG(?):
// doesn't send reduce event until maps all done]
LOG.error("CONTAINER_REMOTE_LAUNCH contains a reduce task ("
+ attemptID + "), but not yet finished with maps");
throw new RuntimeException();
}
// a.k.a. "mapreduce.jobtracker.address" in LocalJobRunner:
// set framework name to local to make task local
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.set(MRConfig.MASTER_ADDRESS, "local"); // bypass shuffle
ReduceTask reduce = (ReduceTask)task;
reduce.setLocalMapFiles(localMapFiles);
reduce.setConf(conf);
reduce.run(conf, umbilical);
relocalize();
}
} catch (FSError e) {
LOG.fatal("FSError from child", e);
// umbilical: MRAppMaster creates (taskAttemptListener), passes to us
if (!ShutdownHookManager.get().isShutdownInProgress()) {
umbilical.fsError(classicAttemptID, e.getMessage());
}
throw new RuntimeException();
} catch (Exception exception) {
LOG.warn("Exception running local (uberized) 'child' : "
+ StringUtils.stringifyException(exception));
try {
if (task != null) {
// do cleanup for the task
task.taskCleanup(umbilical);
}
} catch (Exception e) {
LOG.info("Exception cleaning up: "
+ StringUtils.stringifyException(e));
}
// Report back any failures, for diagnostic purposes
umbilical.reportDiagnosticInfo(classicAttemptID,
StringUtils.stringifyException(exception));
throw new RuntimeException();
} catch (Throwable throwable) {
LOG.fatal("Error running local (uberized) 'child' : "
+ StringUtils.stringifyException(throwable));
if (!ShutdownHookManager.get().isShutdownInProgress()) {
Throwable tCause = throwable.getCause();
String cause =
(tCause == null) ? throwable.getMessage() : StringUtils
.stringifyException(tCause);
umbilical.fatalError(classicAttemptID, cause);
}
throw new RuntimeException();
}
}
/**
* Also within the local filesystem, we need to restore the initial state
* of the directory as much as possible. Compare current contents against
* the saved original state and nuke everything that doesn't belong, with
* the exception of the renamed map outputs.
*
* Any jobs that go out of their way to rename or delete things from the
* local directory are considered broken and deserve what they get...
*/
private void relocalize() {
File[] curLocalFiles = curDir.listFiles();
for (int j = 0; j < curLocalFiles.length; ++j) {
if (!localizedFiles.contains(curLocalFiles[j])) {
// found one that wasn't there before: delete it
boolean deleted = false;
try {
if (curFC != null) {
// this is recursive, unlike File delete():
deleted = curFC.delete(new Path(curLocalFiles[j].getName()),true);
}
} catch (IOException e) {
deleted = false;
}
if (!deleted) {
LOG.warn("Unable to delete unexpected local file/dir "
+ curLocalFiles[j].getName() + ": insufficient permissions?");
}
}
}
}
} // end EventHandler
/**
* Within the _local_ filesystem (not HDFS), all activity takes place within
* a subdir inside one of the LOCAL_DIRS
* (${local.dir}/usercache/$user/appcache/$appId/$contId/),
* and all sub-MapTasks create the same filename ("file.out"). Rename that
* to something unique (e.g., "map_0.out") to avoid possible collisions.
*
* Longer-term, we'll modify [something] to use TaskAttemptID-based
* filenames instead of "file.out". (All of this is entirely internal,
* so there are no particular compatibility issues.)
*/
@VisibleForTesting
protected static MapOutputFile renameMapOutputForReduce(JobConf conf,
TaskAttemptId mapId, MapOutputFile subMapOutputFile) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
// move map output to reduce input
Path mapOut = subMapOutputFile.getOutputFile();
FileStatus mStatus = localFs.getFileStatus(mapOut);
Path reduceIn = subMapOutputFile.getInputFileForWrite(
TypeConverter.fromYarn(mapId).getTaskID(), mStatus.getLen());
Path mapOutIndex = subMapOutputFile.getOutputIndexFile();
Path reduceInIndex = new Path(reduceIn.toString() + ".index");
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming map output file for task attempt "
+ mapId.toString() + " from original location " + mapOut.toString()
+ " to destination " + reduceIn.toString());
}
if (!localFs.mkdirs(reduceIn.getParent())) {
throw new IOException("Mkdirs failed to create "
+ reduceIn.getParent().toString());
}
if (!localFs.rename(mapOut, reduceIn))
throw new IOException("Couldn't rename " + mapOut);
if (!localFs.rename(mapOutIndex, reduceInIndex))
throw new IOException("Couldn't rename " + mapOutIndex);
return new RenamedMapOutputFile(reduceIn);
}
private static class RenamedMapOutputFile extends MapOutputFile {
private Path path;
public RenamedMapOutputFile(Path path) {
this.path = path;
}
@Override
public Path getOutputFile() throws IOException {
return path;
}
@Override
public Path getOutputFileForWrite(long size) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getOutputFileForWriteInVolume(Path existing) {
throw new UnsupportedOperationException();
}
@Override
public Path getOutputIndexFile() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getOutputIndexFileForWrite(long size) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getOutputIndexFileForWriteInVolume(Path existing) {
throw new UnsupportedOperationException();
}
@Override
public Path getSpillFile(int spillNumber) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getSpillFileForWrite(int spillNumber, long size)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getSpillIndexFile(int spillNumber) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getSpillIndexFileForWrite(int spillNumber, long size)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getInputFile(int mapId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Path getInputFileForWrite(TaskID mapId, long size)
throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void removeAll() throws IOException {
throw new UnsupportedOperationException();
}
}
}
| 25,493
| 39.921348
| 105
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRConfig;
/**
* Manipulate the working area for the transient store for maps and reduces.
*
* This class is used by map and reduce tasks to identify the directories that
* they need to write to/read from for intermediate files. The callers of
* these methods are from child space.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class YarnOutputFiles extends MapOutputFile {
private JobConf conf;
private static final String JOB_OUTPUT_DIR = "output";
private static final String SPILL_FILE_PATTERN = "%s_spill_%d.out";
private static final String SPILL_INDEX_FILE_PATTERN = SPILL_FILE_PATTERN
+ ".index";
public YarnOutputFiles() {
}
// assume configured to $localdir/usercache/$user/appcache/$appId
private LocalDirAllocator lDirAlloc =
new LocalDirAllocator(MRConfig.LOCAL_DIR);
private Path getAttemptOutputDir() {
return new Path(JOB_OUTPUT_DIR, conf.get(JobContext.TASK_ATTEMPT_ID));
}
/**
* Return the path to local map output file created earlier
*
* @return path
* @throws IOException
*/
public Path getOutputFile() throws IOException {
Path attemptOutput =
new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING);
return lDirAlloc.getLocalPathToRead(attemptOutput.toString(), conf);
}
/**
* Create a local map output file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
public Path getOutputFileForWrite(long size) throws IOException {
Path attemptOutput =
new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING);
return lDirAlloc.getLocalPathForWrite(attemptOutput.toString(), size, conf);
}
/**
* Create a local map output file name on the same volume.
*/
public Path getOutputFileForWriteInVolume(Path existing) {
Path outputDir = new Path(existing.getParent(), JOB_OUTPUT_DIR);
Path attemptOutputDir = new Path(outputDir,
conf.get(JobContext.TASK_ATTEMPT_ID));
return new Path(attemptOutputDir, MAP_OUTPUT_FILENAME_STRING);
}
/**
* Return the path to a local map output index file created earlier
*
* @return path
* @throws IOException
*/
public Path getOutputIndexFile() throws IOException {
Path attemptIndexOutput =
new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING +
MAP_OUTPUT_INDEX_SUFFIX_STRING);
return lDirAlloc.getLocalPathToRead(attemptIndexOutput.toString(), conf);
}
/**
* Create a local map output index file name.
*
* @param size the size of the file
* @return path
* @throws IOException
*/
public Path getOutputIndexFileForWrite(long size) throws IOException {
Path attemptIndexOutput =
new Path(getAttemptOutputDir(), MAP_OUTPUT_FILENAME_STRING +
MAP_OUTPUT_INDEX_SUFFIX_STRING);
return lDirAlloc.getLocalPathForWrite(attemptIndexOutput.toString(),
size, conf);
}
/**
* Create a local map output index file name on the same volume.
*/
public Path getOutputIndexFileForWriteInVolume(Path existing) {
Path outputDir = new Path(existing.getParent(), JOB_OUTPUT_DIR);
Path attemptOutputDir = new Path(outputDir,
conf.get(JobContext.TASK_ATTEMPT_ID));
return new Path(attemptOutputDir, MAP_OUTPUT_FILENAME_STRING +
MAP_OUTPUT_INDEX_SUFFIX_STRING);
}
/**
* Return a local map spill file created earlier.
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
public Path getSpillFile(int spillNumber) throws IOException {
return lDirAlloc.getLocalPathToRead(
String.format(SPILL_FILE_PATTERN,
conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), conf);
}
/**
* Create a local map spill file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
public Path getSpillFileForWrite(int spillNumber, long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(
String.format(SPILL_FILE_PATTERN,
conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
}
/**
* Return a local map spill index file created earlier
*
* @param spillNumber the number
* @return path
* @throws IOException
*/
public Path getSpillIndexFile(int spillNumber) throws IOException {
return lDirAlloc.getLocalPathToRead(
String.format(SPILL_INDEX_FILE_PATTERN,
conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), conf);
}
/**
* Create a local map spill index file name.
*
* @param spillNumber the number
* @param size the size of the file
* @return path
* @throws IOException
*/
public Path getSpillIndexFileForWrite(int spillNumber, long size)
throws IOException {
return lDirAlloc.getLocalPathForWrite(
String.format(SPILL_INDEX_FILE_PATTERN,
conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
}
/**
* Return a local reduce input file created earlier
*
* @param mapId a map task id
* @return path
* @throws IOException
*/
public Path getInputFile(int mapId) throws IOException {
throw new UnsupportedOperationException("Incompatible with LocalRunner");
}
/**
* Create a local reduce input file name.
*
* @param mapId a map task id
* @param size the size of the file
* @return path
* @throws IOException
*/
public Path getInputFileForWrite(org.apache.hadoop.mapreduce.TaskID mapId,
long size) throws IOException {
return lDirAlloc.getLocalPathForWrite(String.format(
REDUCE_INPUT_FILE_FORMAT_STRING,
getAttemptOutputDir().toString(), mapId.getId()),
size, conf);
}
/** Removes all of the files related to a task. */
public void removeAll() throws IOException {
throw new UnsupportedOperationException("Incompatible with LocalRunner");
}
@Override
public void setConf(Configuration conf) {
if (conf instanceof JobConf) {
this.conf = (JobConf) conf;
} else {
this.conf = new JobConf(conf);
}
}
@Override
public Configuration getConf() {
return conf;
}
}
| 7,552
| 30.60251
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/ReduceTaskAttemptImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.Clock;
@SuppressWarnings("rawtypes")
public class ReduceTaskAttemptImpl extends TaskAttemptImpl {
private final int numMapTasks;
public ReduceTaskAttemptImpl(TaskId id, int attempt,
EventHandler eventHandler, Path jobFile, int partition,
int numMapTasks, JobConf conf,
TaskAttemptListener taskAttemptListener,
Token<JobTokenIdentifier> jobToken,
Credentials credentials, Clock clock,
AppContext appContext) {
super(id, attempt, eventHandler, taskAttemptListener, jobFile, partition,
conf, new String[] {}, jobToken, credentials, clock,
appContext);
this.numMapTasks = numMapTasks;
}
@Override
public Task createRemoteTask() {
//job file name is set in TaskAttempt, setting it null here
ReduceTask reduceTask =
new ReduceTask("", TypeConverter.fromYarn(getID()), partition,
numMapTasks, 1); // YARN doesn't have the concept of slots per task, set it as 1.
reduceTask.setUser(conf.get(MRJobConfig.USER_NAME));
reduceTask.setConf(conf);
return reduceTask;
}
}
| 2,515
| 38.3125
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedProgressSplitsBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
// Workaround for ProgressSplitBlock being package access
public class WrappedProgressSplitsBlock extends ProgressSplitsBlock {
private WrappedPeriodicStatsAccumulator wrappedProgressWallclockTime;
private WrappedPeriodicStatsAccumulator wrappedProgressCPUTime;
private WrappedPeriodicStatsAccumulator wrappedProgressVirtualMemoryKbytes;
private WrappedPeriodicStatsAccumulator wrappedProgressPhysicalMemoryKbytes;
public WrappedProgressSplitsBlock(int numberSplits) {
super(numberSplits);
}
public int[][] burst() {
return super.burst();
}
public WrappedPeriodicStatsAccumulator getProgressWallclockTime() {
if (wrappedProgressWallclockTime == null) {
wrappedProgressWallclockTime = new WrappedPeriodicStatsAccumulator(
progressWallclockTime);
}
return wrappedProgressWallclockTime;
}
public WrappedPeriodicStatsAccumulator getProgressCPUTime() {
if (wrappedProgressCPUTime == null) {
wrappedProgressCPUTime = new WrappedPeriodicStatsAccumulator(
progressCPUTime);
}
return wrappedProgressCPUTime;
}
public WrappedPeriodicStatsAccumulator getProgressVirtualMemoryKbytes() {
if (wrappedProgressVirtualMemoryKbytes == null) {
wrappedProgressVirtualMemoryKbytes = new WrappedPeriodicStatsAccumulator(
progressVirtualMemoryKbytes);
}
return wrappedProgressVirtualMemoryKbytes;
}
public WrappedPeriodicStatsAccumulator getProgressPhysicalMemoryKbytes() {
if (wrappedProgressPhysicalMemoryKbytes == null) {
wrappedProgressPhysicalMemoryKbytes = new WrappedPeriodicStatsAccumulator(
progressPhysicalMemoryKbytes);
}
return wrappedProgressPhysicalMemoryKbytes;
}
}
| 2,558
| 37.19403
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedJvmID.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
/**
* A simple wrapper for increasing the visibility.
*/
public class WrappedJvmID extends JVMId {
public WrappedJvmID(JobID jobID, boolean mapTask, long nextLong) {
super(jobID, mapTask, nextLong);
}
}
| 1,042
| 32.645161
| 74
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Vector;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@SuppressWarnings("deprecation")
public class MapReduceChildJVM {
private static String getTaskLogFile(LogName filter) {
return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR +
filter.toString();
}
private static String getChildEnv(JobConf jobConf, boolean isMap) {
if (isMap) {
return jobConf.get(JobConf.MAPRED_MAP_TASK_ENV,
jobConf.get(JobConf.MAPRED_TASK_ENV));
}
return jobConf.get(JobConf.MAPRED_REDUCE_TASK_ENV,
jobConf.get(JobConf.MAPRED_TASK_ENV));
}
public static void setVMEnv(Map<String, String> environment,
Task task) {
JobConf conf = task.conf;
// Add the env variables passed by the user
String mapredChildEnv = getChildEnv(conf, task.isMapTask());
MRApps.setEnvFromInputString(environment, mapredChildEnv, conf);
// Set logging level in the environment.
// This is so that, if the child forks another "bin/hadoop" (common in
// streaming) it will have the correct loglevel.
environment.put(
"HADOOP_ROOT_LOGGER",
MRApps.getChildLogLevel(conf, task.isMapTask()) + ",console");
// TODO: The following is useful for instance in streaming tasks. Should be
// set in ApplicationMaster's env by the RM.
String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
if (hadoopClientOpts == null) {
hadoopClientOpts = "";
} else {
hadoopClientOpts = hadoopClientOpts + " ";
}
environment.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
// setEnvFromInputString above will add env variable values from
// mapredChildEnv to existing variables. We want to overwrite
// HADOOP_ROOT_LOGGER and HADOOP_CLIENT_OPTS if the user set it explicitly.
Map<String, String> tmpEnv = new HashMap<String, String>();
MRApps.setEnvFromInputString(tmpEnv, mapredChildEnv, conf);
String[] keys = { "HADOOP_ROOT_LOGGER", "HADOOP_CLIENT_OPTS" };
for (String key : keys) {
if (tmpEnv.containsKey(key)) {
environment.put(key, tmpEnv.get(key));
}
}
// Add stdout/stderr env
environment.put(
MRJobConfig.STDOUT_LOGFILE_ENV,
getTaskLogFile(TaskLog.LogName.STDOUT)
);
environment.put(
MRJobConfig.STDERR_LOGFILE_ENV,
getTaskLogFile(TaskLog.LogName.STDERR)
);
}
private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
String userClasspath = "";
String adminClasspath = "";
if (isMapTask) {
userClasspath =
jobConf.get(
JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
jobConf.get(
JobConf.MAPRED_TASK_JAVA_OPTS,
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
);
adminClasspath =
jobConf.get(
MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
} else {
userClasspath =
jobConf.get(
JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
jobConf.get(
JobConf.MAPRED_TASK_JAVA_OPTS,
JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
);
adminClasspath =
jobConf.get(
MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
}
// Add admin classpath first so it can be overridden by user.
return adminClasspath + " " + userClasspath;
}
public static List<String> getVMCommand(
InetSocketAddress taskAttemptListenerAddr, Task task,
JVMId jvmID) {
TaskAttemptID attemptID = task.getTaskID();
JobConf conf = task.conf;
Vector<String> vargs = new Vector<String>(8);
vargs.add(MRApps.crossPlatformifyMREnv(task.conf, Environment.JAVA_HOME)
+ "/bin/java");
// Add child (task) java-vm options.
//
// The following symbols if present in mapred.{map|reduce}.child.java.opts
// value are replaced:
// + @taskid@ is interpolated with value of TaskID.
// Other occurrences of @ will not be altered.
//
// Example with multiple arguments and substitutions, showing
// jvm GC logging, and start of a passwordless JVM JMX agent so can
// connect with jconsole and the likes to watch child memory, threads
// and get thread dumps.
//
// <property>
// <name>mapred.map.child.java.opts</name>
// <value>-Xmx 512M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
// -Dcom.sun.management.jmxremote.authenticate=false \
// -Dcom.sun.management.jmxremote.ssl=false \
// </value>
// </property>
//
// <property>
// <name>mapred.reduce.child.java.opts</name>
// <value>-Xmx 1024M -verbose:gc -Xloggc:/tmp/@taskid@.gc \
// -Dcom.sun.management.jmxremote.authenticate=false \
// -Dcom.sun.management.jmxremote.ssl=false \
// </value>
// </property>
//
String javaOpts = getChildJavaOpts(conf, task.isMapTask());
javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
String [] javaOptsSplit = javaOpts.split(" ");
for (int i = 0; i < javaOptsSplit.length; i++) {
vargs.add(javaOptsSplit[i]);
}
Path childTmpDir = new Path(MRApps.crossPlatformifyMREnv(conf, Environment.PWD),
YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
vargs.add("-Djava.io.tmpdir=" + childTmpDir);
MRApps.addLog4jSystemProperties(task, vargs, conf);
if (conf.getProfileEnabled()) {
if (conf.getProfileTaskRange(task.isMapTask()
).isIncluded(task.getPartition())) {
final String profileParams = conf.get(task.isMapTask()
? MRJobConfig.TASK_MAP_PROFILE_PARAMS
: MRJobConfig.TASK_REDUCE_PROFILE_PARAMS, conf.getProfileParams());
vargs.add(String.format(profileParams,
getTaskLogFile(TaskLog.LogName.PROFILE)));
}
}
// Add main class and its arguments
vargs.add(YarnChild.class.getName()); // main of Child
// pass TaskAttemptListener's address
vargs.add(taskAttemptListenerAddr.getAddress().getHostAddress());
vargs.add(Integer.toString(taskAttemptListenerAddr.getPort()));
vargs.add(attemptID.toString()); // pass task identifier
// Finally add the jvmID
vargs.add(String.valueOf(jvmID.getId()));
vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
// Final commmand
StringBuilder mergedCommand = new StringBuilder();
for (CharSequence str : vargs) {
mergedCommand.append(str).append(" ");
}
Vector<String> vargsFinal = new Vector<String>(1);
vargsFinal.add(mergedCommand.toString());
return vargsFinal;
}
}
| 8,177
| 36.342466
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/WrappedPeriodicStatsAccumulator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
//Workaround for PeriodicStateAccumulator being package access
public class WrappedPeriodicStatsAccumulator {
private PeriodicStatsAccumulator real;
public WrappedPeriodicStatsAccumulator(PeriodicStatsAccumulator real) {
this.real = real;
}
public void extend(double newProgress, int newValue) {
real.extend(newProgress, newValue);
}
}
| 1,202
| 34.382353
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic
.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.EventReader;
import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.HistoryViewer;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.RackResolver;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestJobHistoryParsing {
private static final Log LOG = LogFactory.getLog(TestJobHistoryParsing.class);
private static final String RACK_NAME = "/MyRackName";
private ByteArrayOutputStream outContent = new ByteArrayOutputStream();
public static class MyResolver implements DNSToSwitchMapping {
@Override
public List<String> resolve(List<String> names) {
return Arrays.asList(new String[] { RACK_NAME });
}
@Override
public void reloadCachedMappings() {
}
@Override
public void reloadCachedMappings(List<String> names) {
}
}
@Test(timeout = 50000)
public void testJobInfo() throws Exception {
JobInfo info = new JobInfo();
Assert.assertEquals("NORMAL", info.getPriority());
info.printAll();
}
@Test(timeout = 300000)
public void testHistoryParsing() throws Exception {
LOG.info("STARTING testHistoryParsing()");
try {
checkHistoryParsing(2, 1, 2);
} finally {
LOG.info("FINISHED testHistoryParsing()");
}
}
@Test(timeout = 50000)
public void testHistoryParsingWithParseErrors() throws Exception {
LOG.info("STARTING testHistoryParsingWithParseErrors()");
try {
checkHistoryParsing(3, 0, 2);
} finally {
LOG.info("FINISHED testHistoryParsingWithParseErrors()");
}
}
private static String getJobSummary(FileContext fc, Path path)
throws IOException {
Path qPath = fc.makeQualified(path);
FSDataInputStream in = fc.open(qPath);
String jobSummaryString = in.readUTF();
in.close();
return jobSummaryString;
}
private void checkHistoryParsing(final int numMaps, final int numReduces,
final int numSuccessfulMaps) throws Exception {
Configuration conf = new Configuration();
conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
long amStartTimeEst = System.currentTimeMillis();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistory(numMaps, numReduces, true, this.getClass()
.getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
String jobhistoryDir = JobHistoryUtils
.getHistoryIntermediateDoneDirForUser(conf);
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
} catch (IOException ioe) {
LOG.info("Can not get FileContext", ioe);
throw (new Exception("Can not get File Context"));
}
if (numMaps == numSuccessfulMaps) {
String summaryFileName = JobHistoryUtils
.getIntermediateSummaryFileName(jobId);
Path summaryFile = new Path(jobhistoryDir, summaryFileName);
String jobSummaryString = getJobSummary(fc, summaryFile);
Assert.assertNotNull(jobSummaryString);
Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100"));
Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100"));
Map<String, String> jobSummaryElements = new HashMap<String, String>();
StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
while (strToken.hasMoreTokens()) {
String keypair = strToken.nextToken();
jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
}
Assert.assertEquals("JobId does not match", jobId.toString(),
jobSummaryElements.get("jobId"));
Assert.assertEquals("JobName does not match", "test",
jobSummaryElements.get("jobName"));
Assert.assertTrue("submitTime should not be 0",
Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
Assert.assertTrue("launchTime should not be 0",
Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
Assert
.assertTrue(
"firstMapTaskLaunchTime should not be 0",
Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
Assert
.assertTrue("firstReduceTaskLaunchTime should not be 0",
Long.parseLong(jobSummaryElements
.get("firstReduceTaskLaunchTime")) != 0);
Assert.assertTrue("finishTime should not be 0",
Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps,
Integer.parseInt(jobSummaryElements.get("numMaps")));
Assert.assertEquals("Mismatch in num reduce slots", numReduces,
Integer.parseInt(jobSummaryElements.get("numReduces")));
Assert.assertEquals("User does not match",
System.getProperty("user.name"), jobSummaryElements.get("user"));
Assert.assertEquals("Queue does not match", "default",
jobSummaryElements.get("queue"));
Assert.assertEquals("Status does not match", "SUCCEEDED",
jobSummaryElements.get("status"));
}
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobInfo jobInfo;
long numFinishedMaps;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
LOG.info("JobHistoryFile is: " + historyFilePath);
try {
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
JobHistoryParser parser = new JobHistoryParser(in);
final EventReader realReader = new EventReader(in);
EventReader reader = Mockito.mock(EventReader.class);
if (numMaps == numSuccessfulMaps) {
reader = realReader;
} else {
final AtomicInteger numFinishedEvents = new AtomicInteger(0); // Hack!
Mockito.when(reader.getNextEvent()).thenAnswer(
new Answer<HistoryEvent>() {
public HistoryEvent answer(InvocationOnMock invocation)
throws IOException {
HistoryEvent event = realReader.getNextEvent();
if (event instanceof TaskFinishedEvent) {
numFinishedEvents.incrementAndGet();
}
if (numFinishedEvents.get() <= numSuccessfulMaps) {
return event;
} else {
throw new IOException("test");
}
}
});
}
jobInfo = parser.parse(reader);
numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps);
if (numFinishedMaps != numMaps) {
Exception parseException = parser.getParseException();
Assert.assertNotNull("Didn't get expected parse exception",
parseException);
}
}
Assert.assertEquals("Incorrect username ", System.getProperty("user.name"),
jobInfo.getUsername());
Assert.assertEquals("Incorrect jobName ", "test", jobInfo.getJobname());
Assert.assertEquals("Incorrect queuename ", "default",
jobInfo.getJobQueueName());
Assert
.assertEquals("incorrect conf path", "test", jobInfo.getJobConfPath());
Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps,
numFinishedMaps);
Assert.assertEquals("incorrect finishedReduces ", numReduces,
jobInfo.getFinishedReduces());
Assert.assertEquals("incorrect uberized ", job.isUber(),
jobInfo.getUberized());
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
int totalTasks = allTasks.size();
Assert.assertEquals("total number of tasks is incorrect ",
(numMaps + numReduces), totalTasks);
// Verify aminfo
Assert.assertEquals(1, jobInfo.getAMInfos().size());
Assert.assertEquals(MRApp.NM_HOST, jobInfo.getAMInfos().get(0)
.getNodeManagerHost());
AMInfo amInfo = jobInfo.getAMInfos().get(0);
Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId()
.getApplicationAttemptId());
Assert.assertTrue(amInfo.getStartTime() <= System.currentTimeMillis()
&& amInfo.getStartTime() >= amStartTimeEst);
ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
// Assert at taskAttempt level
for (TaskInfo taskInfo : allTasks.values()) {
int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
Assert
.assertEquals("total number of task attempts ", 1, taskAttemptCount);
TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values()
.iterator().next();
Assert.assertNotNull(taInfo.getContainerId());
// Verify the wrong ctor is not being used. Remove after mrv1 is removed.
Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
}
// Deep compare Job and JobInfo
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
Assert.assertNotNull("TaskInfo not found", taskInfo);
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
TypeConverter.fromYarn((taskAttempt.getID())));
Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
Assert.assertEquals("Incorrect shuffle port for task attempt",
taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
if (numMaps == numSuccessfulMaps) {
Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());
// Verify rack-name
Assert.assertEquals("rack-name is incorrect",
taskAttemptInfo.getRackname(), RACK_NAME);
}
}
}
// test output for HistoryViewer
PrintStream stdps = System.out;
try {
System.setOut(new PrintStream(outContent));
HistoryViewer viewer;
synchronized (fileInfo) {
viewer = new HistoryViewer(fc.makeQualified(
fileInfo.getHistoryFile()).toString(), conf, true);
}
viewer.print();
for (TaskInfo taskInfo : allTasks.values()) {
String test = (taskInfo.getTaskStatus() == null ? "" : taskInfo
.getTaskStatus())
+ " "
+ taskInfo.getTaskType()
+ " task list for " + taskInfo.getTaskId().getJobID();
Assert.assertTrue(outContent.toString().indexOf(test) > 0);
Assert.assertTrue(outContent.toString().indexOf(
taskInfo.getTaskId().toString()) > 0);
}
} finally {
System.setOut(stdps);
}
}
// Computes finished maps similar to RecoveryService...
private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
int numSuccessfulMaps) {
if (numMaps == numSuccessfulMaps) {
return jobInfo.getFinishedMaps();
}
long numFinishedMaps = 0;
Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo
.getAllTasks();
for (TaskInfo taskInfo : taskInfos.values()) {
if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
++numFinishedMaps;
}
}
return numFinishedMaps;
}
@Test(timeout = 30000)
public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf = new Configuration();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedAttempt(2, 1, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,
parseException);
int noOffailedAttempts = 0;
Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
for (Task task : job.getTasks().values()) {
TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
for (TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(
TypeConverter.fromYarn((taskAttempt.getID())));
// Verify rack-name for all task attempts
Assert.assertEquals("rack-name is incorrect",
taskAttemptInfo.getRackname(), RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.", 2,
noOffailedAttempts);
} finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
@Test(timeout = 60000)
public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf = new Configuration();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithFailedTask(2, 1, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.FAILED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,
parseException);
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
CompletedTask ct = new CompletedTask(yarnTaskID, entry.getValue());
Assert.assertNotNull("completed task report has null counters", ct
.getReport().getCounters());
}
final List<String> originalDiagnostics = job.getDiagnostics();
final String historyError = jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",
originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ", historyError);
for (String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
} finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}
@Test(timeout = 60000)
public void testDiagnosticsForKilledJob() throws Exception {
LOG.info("STARTING testDiagnosticsForKilledJob");
try {
final Configuration conf = new Configuration();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistoryWithJobKilled(2, 1, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.KILLED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath = fileInfo.getHistoryFile();
FSDataInputStream in = null;
FileContext fc = null;
try {
fc = FileContext.getFileContext(conf);
in = fc.open(fc.makeQualified(historyFilePath));
} catch (IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath, ioe);
throw (new Exception("Can not open History File"));
}
parser = new JobHistoryParser(in);
jobInfo = parser.parse();
}
Exception parseException = parser.getParseException();
assertNull("Caught an expected exception " + parseException,
parseException);
final List<String> originalDiagnostics = job.getDiagnostics();
final String historyError = jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",
originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ", historyError);
for (String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
assertTrue("No killed message in diagnostics",
historyError.contains(JobImpl.JOB_KILLED_DIAG));
} finally {
LOG.info("FINISHED testDiagnosticsForKilledJob");
}
}
@Test(timeout = 50000)
public void testScanningOldDirs() throws Exception {
LOG.info("STARTING testScanningOldDirs");
try {
Configuration conf = new Configuration();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
HistoryFileManagerForTest hfm = new HistoryFileManagerForTest();
hfm.init(conf);
HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
Assert.assertNotNull("Unable to locate job history", fileInfo);
// force the manager to "forget" the job
hfm.deleteJobFromJobListCache(fileInfo);
final int msecPerSleep = 10;
int msecToSleep = 10 * 1000;
while (fileInfo.isMovePending() && msecToSleep > 0) {
Assert.assertTrue(!fileInfo.didMoveFail());
msecToSleep -= msecPerSleep;
Thread.sleep(msecPerSleep);
}
Assert.assertTrue("Timeout waiting for history move", msecToSleep > 0);
fileInfo = hfm.getFileInfo(jobId);
hfm.stop();
Assert.assertNotNull("Unable to locate old job history", fileInfo);
Assert.assertTrue("HistoryFileManager not shutdown properly",
hfm.moveToDoneExecutor.isTerminated());
} finally {
LOG.info("FINISHED testScanningOldDirs");
}
}
static class MRAppWithHistoryWithFailedAttempt extends MRAppWithHistory {
public MRAppWithHistoryWithFailedAttempt(int maps, int reduces,
boolean autoComplete, String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@SuppressWarnings("unchecked")
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0 && attemptID.getId() == 0) {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
}
}
static class MRAppWithHistoryWithFailedTask extends MRAppWithHistory {
public MRAppWithHistoryWithFailedTask(int maps, int reduces,
boolean autoComplete, String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@SuppressWarnings("unchecked")
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0) {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
}
}
static class MRAppWithHistoryWithJobKilled extends MRAppWithHistory {
public MRAppWithHistoryWithJobKilled(int maps, int reduces,
boolean autoComplete, String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@SuppressWarnings("unchecked")
@Override
protected void attemptLaunched(TaskAttemptId attemptID) {
if (attemptID.getTaskId().getId() == 0) {
getContext().getEventHandler().handle(
new JobEvent(attemptID.getTaskId().getJobId(),
JobEventType.JOB_KILL));
} else {
getContext().getEventHandler().handle(
new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
}
}
}
static class HistoryFileManagerForTest extends HistoryFileManager {
void deleteJobFromJobListCache(HistoryFileInfo fileInfo) {
jobListCache.delete(fileInfo);
}
}
public static void main(String[] args) throws Exception {
TestJobHistoryParsing t = new TestJobHistoryParsing();
t.testHistoryParsing();
t.testHistoryParsingForFailedAttempts();
}
/**
* test clean old history files. Files should be deleted after 1 week by
* default.
*/
@Test(timeout = 15000)
public void testDeleteFileInfo() throws Exception {
LOG.info("STARTING testDeleteFileInfo");
try {
Configuration conf = new Configuration();
conf.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
HistoryFileManager hfm = new HistoryFileManager();
hfm.init(conf);
HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
hfm.initExisting();
// wait for move files form the done_intermediate directory to the gone
// directory
while (fileInfo.isMovePending()) {
Thread.sleep(300);
}
Assert.assertNotNull(hfm.jobListCache.values());
// try to remove fileInfo
hfm.clean();
// check that fileInfo does not deleted
Assert.assertFalse(fileInfo.isDeleted());
// correct live time
hfm.setMaxHistoryAge(-1);
hfm.clean();
hfm.stop();
Assert.assertTrue("Thread pool shutdown",
hfm.moveToDoneExecutor.isTerminated());
// should be deleted !
Assert.assertTrue("file should be deleted ", fileInfo.isDeleted());
} finally {
LOG.info("FINISHED testDeleteFileInfo");
}
}
/**
* Simple test some methods of JobHistory
*/
@Test(timeout = 20000)
public void testJobHistoryMethods() throws Exception {
LOG.info("STARTING testJobHistoryMethods");
try {
Configuration configuration = new Configuration();
configuration
.setClass(
NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(configuration);
MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
true);
app.submit(configuration);
Job job = app.getContext().getAllJobs().values().iterator().next();
app.waitForState(job, JobState.SUCCEEDED);
JobHistory jobHistory = new JobHistory();
jobHistory.init(configuration);
// Method getAllJobs
Assert.assertEquals(1, jobHistory.getAllJobs().size());
// and with ApplicationId
Assert.assertEquals(1, jobHistory.getAllJobs(app.getAppID()).size());
JobsInfo jobsinfo = jobHistory.getPartialJobs(0L, 10L, null, "default",
0L, System.currentTimeMillis() + 1, 0L,
System.currentTimeMillis() + 1, JobState.SUCCEEDED);
Assert.assertEquals(1, jobsinfo.getJobs().size());
Assert.assertNotNull(jobHistory.getApplicationAttemptId());
// test Application Id
Assert.assertEquals("application_0_0000", jobHistory.getApplicationID()
.toString());
Assert
.assertEquals("Job History Server", jobHistory.getApplicationName());
// method does not work
Assert.assertNull(jobHistory.getEventHandler());
// method does not work
Assert.assertNull(jobHistory.getClock());
// method does not work
Assert.assertNull(jobHistory.getClusterInfo());
} finally {
LOG.info("FINISHED testJobHistoryMethods");
}
}
/**
* Simple test PartialJob
*/
@Test(timeout = 3000)
public void testPartialJob() throws Exception {
JobId jobId = new JobIdPBImpl();
jobId.setId(0);
JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
"jobName", jobId, 3, 2, "JobStatus");
PartialJob test = new PartialJob(jii, jobId);
assertEquals(1.0f, test.getProgress(), 0.001);
assertNull(test.getAllCounters());
assertNull(test.getTasks());
assertNull(test.getTasks(TaskType.MAP));
assertNull(test.getTask(new TaskIdPBImpl()));
assertNull(test.getTaskAttemptCompletionEvents(0, 100));
assertNull(test.getMapAttemptCompletionEvents(0, 100));
assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
assertNull(test.getAMInfos());
}
@Test
public void testMultipleFailedTasks() throws Exception {
JobHistoryParser parser =
new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
EventReader reader = Mockito.mock(EventReader.class);
final AtomicInteger numEventsRead = new AtomicInteger(0); // Hack!
final org.apache.hadoop.mapreduce.TaskType taskType =
org.apache.hadoop.mapreduce.TaskType.MAP;
final TaskID[] tids = new TaskID[2];
final JobID jid = new JobID("1", 1);
tids[0] = new TaskID(jid, taskType, 0);
tids[1] = new TaskID(jid, taskType, 1);
Mockito.when(reader.getNextEvent()).thenAnswer(
new Answer<HistoryEvent>() {
public HistoryEvent answer(InvocationOnMock invocation)
throws IOException {
// send two task start and two task fail events for tasks 0 and 1
int eventId = numEventsRead.getAndIncrement();
TaskID tid = tids[eventId & 0x1];
if (eventId < 2) {
return new TaskStartedEvent(tid, 0, taskType, "");
}
if (eventId < 4) {
TaskFailedEvent tfe = new TaskFailedEvent(tid, 0, taskType,
"failed", "FAILED", null, new Counters());
tfe.setDatum(tfe.getDatum());
return tfe;
}
if (eventId < 5) {
JobUnsuccessfulCompletionEvent juce =
new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0,
"JOB_FAILED", Collections.singletonList(
"Task failed: " + tids[0].toString()));
return juce;
}
return null;
}
});
JobInfo info = parser.parse(reader);
assertTrue("Task 0 not implicated",
info.getErrorInfo().contains(tids[0].toString()));
}
@Test
public void testFailedJobHistoryWithoutDiagnostics() throws Exception {
final Path histPath = new Path(getClass().getClassLoader().getResource(
"job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist")
.getFile());
final FileSystem lfs = FileSystem.getLocal(new Configuration());
final FSDataInputStream fsdis = lfs.open(histPath);
try {
JobHistoryParser parser = new JobHistoryParser(fsdis);
JobInfo info = parser.parse();
assertEquals("History parsed jobId incorrectly",
info.getJobId(), JobID.forName("job_1393307629410_0001") );
assertEquals("Default diagnostics incorrect ", "", info.getErrorInfo());
} finally {
fsdis.close();
}
}
/**
* Test compatibility of JobHistoryParser with 2.0.3-alpha history files
* @throws IOException
*/
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters203() throws IOException
{
Path histPath = new Path(getClass().getClassLoader().getResource(
"job_2.0.3-alpha-FAILED.jhist").getFile());
JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
(new Configuration()), histPath);
JobInfo jobInfo = parser.parse();
LOG.info(" job info: " + jobInfo.getJobname() + " "
+ jobInfo.getFinishedMaps() + " "
+ jobInfo.getTotalMaps() + " "
+ jobInfo.getJobId() ) ;
}
/**
* Test compatibility of JobHistoryParser with 2.4.0 history files
* @throws IOException
*/
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters240() throws IOException
{
Path histPath = new Path(getClass().getClassLoader().getResource(
"job_2.4.0-FAILED.jhist").getFile());
JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
(new Configuration()), histPath);
JobInfo jobInfo = parser.parse();
LOG.info(" job info: " + jobInfo.getJobname() + " "
+ jobInfo.getFinishedMaps() + " "
+ jobInfo.getTotalMaps() + " "
+ jobInfo.getJobId() );
}
/**
* Test compatibility of JobHistoryParser with 0.23.9 history files
* @throws IOException
*/
@Test
public void testTaskAttemptUnsuccessfulCompletionWithoutCounters0239() throws IOException
{
Path histPath = new Path(getClass().getClassLoader().getResource(
"job_0.23.9-FAILED.jhist").getFile());
JobHistoryParser parser = new JobHistoryParser(FileSystem.getLocal
(new Configuration()), histPath);
JobInfo jobInfo = parser.parse();
LOG.info(" job info: " + jobInfo.getJobname() + " "
+ jobInfo.getFinishedMaps() + " "
+ jobInfo.getTotalMaps() + " "
+ jobInfo.getJobId() ) ;
}
}
| 37,108
| 38.352068
| 115
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing.MyResolver;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.Service.STATE;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.util.RackResolver;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.*;
/*
test JobHistoryServer protocols....
*/
public class TestJobHistoryServer {
private static RecordFactory recordFactory = RecordFactoryProvider
.getRecordFactory(null);
JobHistoryServer historyServer=null;
// simple test init/start/stop JobHistoryServer. Status should change.
@Test (timeout= 50000 )
public void testStartStopServer() throws Exception {
historyServer = new JobHistoryServer();
Configuration config = new Configuration();
historyServer.init(config);
assertEquals(STATE.INITED, historyServer.getServiceState());
assertEquals(6, historyServer.getServices().size());
HistoryClientService historyService = historyServer.getClientService();
assertNotNull(historyServer.getClientService());
assertEquals(STATE.INITED, historyService.getServiceState());
historyServer.start();
assertEquals(STATE.STARTED, historyServer.getServiceState());
assertEquals(STATE.STARTED, historyService.getServiceState());
historyServer.stop();
assertEquals(STATE.STOPPED, historyServer.getServiceState());
assertNotNull(historyService.getClientHandler().getConnectAddress());
}
//Test reports of JobHistoryServer. History server should get log files from MRApp and read them
@Test (timeout= 50000 )
public void testReports() throws Exception {
Configuration config = new Configuration();
config
.setClass(
CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
MyResolver.class, DNSToSwitchMapping.class);
RackResolver.init(config);
MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
true);
app.submit(config);
Job job = app.getContext().getAllJobs().values().iterator().next();
app.waitForState(job, JobState.SUCCEEDED);
historyServer = new JobHistoryServer();
historyServer.init(config);
historyServer.start();
// search JobHistory service
JobHistory jobHistory= null;
for (Service service : historyServer.getServices() ) {
if (service instanceof JobHistory) {
jobHistory = (JobHistory) service;
}
};
Map<JobId, Job> jobs= jobHistory.getAllJobs();
assertEquals(1, jobs.size());
assertEquals("job_0_0000",jobs.keySet().iterator().next().toString());
Task task = job.getTasks().values().iterator().next();
TaskAttempt attempt = task.getAttempts().values().iterator().next();
HistoryClientService historyService = historyServer.getClientService();
MRClientProtocol protocol = historyService.getClientHandler();
GetTaskAttemptReportRequest gtarRequest = recordFactory
.newRecordInstance(GetTaskAttemptReportRequest.class);
// test getTaskAttemptReport
TaskAttemptId taId = attempt.getID();
taId.setTaskId(task.getID());
taId.getTaskId().setJobId(job.getID());
gtarRequest.setTaskAttemptId(taId);
GetTaskAttemptReportResponse response = protocol
.getTaskAttemptReport(gtarRequest);
assertEquals("container_0_0000_01_000000", response.getTaskAttemptReport()
.getContainerId().toString());
assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty());
// counters
assertNotNull(response.getTaskAttemptReport().getCounters()
.getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES));
assertEquals(taId.toString(), response.getTaskAttemptReport()
.getTaskAttemptId().toString());
// test getTaskReport
GetTaskReportRequest request = recordFactory
.newRecordInstance(GetTaskReportRequest.class);
TaskId taskId = task.getID();
taskId.setJobId(job.getID());
request.setTaskId(taskId);
GetTaskReportResponse reportResponse = protocol.getTaskReport(request);
assertEquals("", reportResponse.getTaskReport().getDiagnosticsList()
.iterator().next());
// progress
assertEquals(1.0f, reportResponse.getTaskReport().getProgress(), 0.01);
// report has corrected taskId
assertEquals(taskId.toString(), reportResponse.getTaskReport().getTaskId()
.toString());
// Task state should be SUCCEEDED
assertEquals(TaskState.SUCCEEDED, reportResponse.getTaskReport()
.getTaskState());
// For invalid jobid, throw IOException
GetTaskReportsRequest gtreportsRequest =
recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(TypeConverter.toYarn(JobID
.forName("job_1415730144495_0001")));
gtreportsRequest.setTaskType(TaskType.REDUCE);
try {
protocol.getTaskReports(gtreportsRequest);
fail("IOException not thrown for invalid job id");
} catch (IOException e) {
// Expected
}
// test getTaskAttemptCompletionEvents
GetTaskAttemptCompletionEventsRequest taskAttemptRequest = recordFactory
.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
taskAttemptRequest.setJobId(job.getID());
GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse = protocol
.getTaskAttemptCompletionEvents(taskAttemptRequest);
assertEquals(0, taskAttemptCompletionEventsResponse.getCompletionEventCount());
// test getDiagnostics
GetDiagnosticsRequest diagnosticRequest = recordFactory
.newRecordInstance(GetDiagnosticsRequest.class);
diagnosticRequest.setTaskAttemptId(taId);
GetDiagnosticsResponse diagnosticResponse = protocol
.getDiagnostics(diagnosticRequest);
// it is strange : why one empty string ?
assertEquals(1, diagnosticResponse.getDiagnosticsCount());
assertEquals("", diagnosticResponse.getDiagnostics(0));
}
// test launch method
@Test (timeout =60000)
public void testLaunch() throws Exception {
ExitUtil.disableSystemExit();
try {
historyServer = JobHistoryServer.launchJobHistoryServer(new String[0]);
} catch (ExitUtil.ExitException e) {
assertEquals(0,e.status);
ExitUtil.resetFirstExitException();
fail();
}
}
@After
public void stop(){
if(historyServer != null) {
historyServer.stop();
}
}
}
| 9,341
| 41.081081
| 100
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJHSDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.junit.Test;
public class TestJHSDelegationTokenSecretManager {
@Test
public void testRecovery() throws IOException {
Configuration conf = new Configuration();
HistoryServerStateStoreService store =
new HistoryServerMemStateStoreService();
store.init(conf);
store.start();
JHSDelegationTokenSecretManagerForTest mgr =
new JHSDelegationTokenSecretManagerForTest(store);
mgr.startThreads();
MRDelegationTokenIdentifier tokenId1 = new MRDelegationTokenIdentifier(
new Text("tokenOwner"), new Text("tokenRenewer"),
new Text("tokenUser"));
Token<MRDelegationTokenIdentifier> token1 =
new Token<MRDelegationTokenIdentifier>(tokenId1, mgr);
MRDelegationTokenIdentifier tokenId2 = new MRDelegationTokenIdentifier(
new Text("tokenOwner"), new Text("tokenRenewer"),
new Text("tokenUser"));
Token<MRDelegationTokenIdentifier> token2 =
new Token<MRDelegationTokenIdentifier>(tokenId2, mgr);
DelegationKey[] keys = mgr.getAllKeys();
long tokenRenewDate1 = mgr.getAllTokens().get(tokenId1).getRenewDate();
long tokenRenewDate2 = mgr.getAllTokens().get(tokenId2).getRenewDate();
mgr.stopThreads();
mgr = new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
List<DelegationKey> recoveredKeys = Arrays.asList(mgr.getAllKeys());
for (DelegationKey key : keys) {
assertTrue("key missing after recovery", recoveredKeys.contains(key));
}
assertTrue("token1 missing", mgr.getAllTokens().containsKey(tokenId1));
assertEquals("token1 renew date", tokenRenewDate1,
mgr.getAllTokens().get(tokenId1).getRenewDate());
assertTrue("token2 missing", mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date", tokenRenewDate2,
mgr.getAllTokens().get(tokenId2).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId1, token1.getPassword());
mgr.verifyToken(tokenId2, token2.getPassword());
MRDelegationTokenIdentifier tokenId3 = new MRDelegationTokenIdentifier(
new Text("tokenOwner"), new Text("tokenRenewer"),
new Text("tokenUser"));
Token<MRDelegationTokenIdentifier> token3 =
new Token<MRDelegationTokenIdentifier>(tokenId3, mgr);
assertEquals("sequence number restore", tokenId2.getSequenceNumber() + 1,
tokenId3.getSequenceNumber());
mgr.cancelToken(token1, "tokenOwner");
// Testing with full principal name
MRDelegationTokenIdentifier tokenIdFull = new MRDelegationTokenIdentifier(
new Text("tokenOwner/localhost@LOCALHOST"), new Text("tokenRenewer"),
new Text("tokenUser"));
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
Token<MRDelegationTokenIdentifier> tokenFull = new Token<MRDelegationTokenIdentifier>(
tokenIdFull, mgr);
// Negative test
try {
mgr.cancelToken(tokenFull, "tokenOwner");
} catch (AccessControlException ace) {
assertTrue(ace.getMessage().contains(
"is not authorized to cancel the token"));
}
// Succeed to cancel with full principal
mgr.cancelToken(tokenFull, tokenIdFull.getOwner().toString());
long tokenRenewDate3 = mgr.getAllTokens().get(tokenId3).getRenewDate();
mgr.stopThreads();
mgr = new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
assertFalse("token1 should be missing",
mgr.getAllTokens().containsKey(tokenId1));
assertTrue("token2 missing", mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date", tokenRenewDate2,
mgr.getAllTokens().get(tokenId2).getRenewDate());
assertTrue("token3 missing", mgr.getAllTokens().containsKey(tokenId3));
assertEquals("token3 renew date", tokenRenewDate3,
mgr.getAllTokens().get(tokenId3).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId2, token2.getPassword());
mgr.verifyToken(tokenId3, token3.getPassword());
mgr.stopThreads();
}
private static class JHSDelegationTokenSecretManagerForTest
extends JHSDelegationTokenSecretManager {
public JHSDelegationTokenSecretManagerForTest(
HistoryServerStateStoreService store) {
super(10000, 10000, 10000, 10000, store);
}
public Map<MRDelegationTokenIdentifier, DelegationTokenInformation> getAllTokens() {
return new HashMap<MRDelegationTokenIdentifier, DelegationTokenInformation>(currentTokens);
}
}
}
| 5,989
| 40.888112
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryJobs.JobsPair;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
public class MockHistoryContext extends MockAppContext implements HistoryContext {
private final Map<JobId, Job> partialJobs;
private final Map<JobId, Job> fullJobs;
public MockHistoryContext(int numJobs, int numTasks, int numAttempts) {
super(0);
JobsPair jobs;
try {
jobs = MockHistoryJobs.newHistoryJobs(numJobs, numTasks, numAttempts);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
partialJobs = jobs.partial;
fullJobs = jobs.full;
}
public MockHistoryContext(int appid, int numJobs, int numTasks,
int numAttempts) {
super(appid);
JobsPair jobs;
try {
jobs = MockHistoryJobs.newHistoryJobs(getApplicationID(), numJobs, numTasks,
numAttempts);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
partialJobs = jobs.partial;
fullJobs = jobs.full;
}
public MockHistoryContext(int appid, int numTasks, int numAttempts, Path confPath) {
super(appid, numTasks, numAttempts, confPath);
fullJobs = super.getAllJobs();
partialJobs = null;
}
public MockHistoryContext(int appid, int numJobs, int numTasks, int numAttempts,
boolean hasFailedTasks) {
super(appid);
JobsPair jobs;
try {
jobs = MockHistoryJobs.newHistoryJobs(getApplicationID(), numJobs, numTasks,
numAttempts, hasFailedTasks);
} catch (IOException e) {
throw new YarnRuntimeException(e);
}
partialJobs = jobs.partial;
fullJobs = jobs.full;
}
@Override
public Job getJob(JobId jobID) {
return fullJobs.get(jobID);
}
public Job getPartialJob(JobId jobID) {
return partialJobs.get(jobID);
}
@Override
public Map<JobId, Job> getAllJobs() {
return fullJobs;
}
@Override
public Map<JobId, Job> getAllJobs(ApplicationId appID) {
return null;
}
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd,
JobState jobState) {
return CachedHistoryStorage.getPartialJobs(this.partialJobs.values(),
offset, count, user, queue, sBegin, sEnd, fBegin, fEnd, jobState);
}
}
| 3,597
| 30.840708
| 86
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobIdHistoryFileInfoMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Collection;
import java.util.NavigableSet;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.JobIdHistoryFileInfoMap;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestJobIdHistoryFileInfoMap {
private boolean checkSize(JobIdHistoryFileInfoMap map, int size)
throws InterruptedException {
for (int i = 0; i < 100; i++) {
if (map.size() != size)
Thread.sleep(20);
else
return true;
}
return false;
}
/**
* Trivial test case that verifies basic functionality of {@link
* JobIdHistoryFileInfoMap}
*/
@Test(timeout = 2000)
public void testWithSingleElement() throws InterruptedException {
JobIdHistoryFileInfoMap mapWithSize = new JobIdHistoryFileInfoMap();
JobId jobId = MRBuilderUtils.newJobId(1, 1, 1);
HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId);
// add it twice
assertEquals("Incorrect return on putIfAbsent()",
null, mapWithSize.putIfAbsent(jobId, fileInfo1));
assertEquals("Incorrect return on putIfAbsent()",
fileInfo1, mapWithSize.putIfAbsent(jobId, fileInfo1));
// check get()
assertEquals("Incorrect get()", fileInfo1, mapWithSize.get(jobId));
assertTrue("Incorrect size()", checkSize(mapWithSize, 1));
// check navigableKeySet()
NavigableSet<JobId> set = mapWithSize.navigableKeySet();
assertEquals("Incorrect navigableKeySet()", 1, set.size());
assertTrue("Incorrect navigableKeySet()", set.contains(jobId));
// check values()
Collection<HistoryFileInfo> values = mapWithSize.values();
assertEquals("Incorrect values()", 1, values.size());
assertTrue("Incorrect values()", values.contains(fileInfo1));
}
}
| 2,931
| 36.113924
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobListCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.lang.InterruptedException;
import java.util.Collection;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.JobListCache;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.*;
public class TestJobListCache {
@Test (timeout = 1000)
public void testAddExisting() {
JobListCache cache = new JobListCache(2, 1000);
JobId jobId = MRBuilderUtils.newJobId(1, 1, 1);
HistoryFileInfo fileInfo = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo.getJobId()).thenReturn(jobId);
cache.addIfAbsent(fileInfo);
cache.addIfAbsent(fileInfo);
assertEquals("Incorrect number of cache entries", 1,
cache.values().size());
}
@Test (timeout = 1000)
public void testEviction() throws InterruptedException {
int maxSize = 2;
JobListCache cache = new JobListCache(maxSize, 1000);
JobId jobId1 = MRBuilderUtils.newJobId(1, 1, 1);
HistoryFileInfo fileInfo1 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);
JobId jobId2 = MRBuilderUtils.newJobId(2, 2, 2);
HistoryFileInfo fileInfo2 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);
JobId jobId3 = MRBuilderUtils.newJobId(3, 3, 3);
HistoryFileInfo fileInfo3 = Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);
cache.addIfAbsent(fileInfo1);
cache.addIfAbsent(fileInfo2);
cache.addIfAbsent(fileInfo3);
Collection <HistoryFileInfo> values;
for (int i = 0; i < 9; i++) {
values = cache.values();
if (values.size() > maxSize) {
Thread.sleep(100);
} else {
assertFalse("fileInfo1 should have been evicted",
values.contains(fileInfo1));
return;
}
}
fail("JobListCache didn't delete the extra entry");
}
}
| 2,944
| 34.481928
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Test;
public class TestJobHistoryEvents {
private static final Log LOG = LogFactory.getLog(TestJobHistoryEvents.class);
@Test
public void testHistoryEvents() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue( context.getStartTime()>0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct", 2,
parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"), parsedJob.getUserName());
Map<TaskId, Task> tasks = parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct", 3, tasks.size());
for (Task task : tasks.values()) {
verifyTask(task);
}
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct", 2, maps.size());
Map<TaskId, Task> reduces = parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct", 1, reduces.size());
Assert.assertEquals("CompletedReduce not correct", 1,
parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect", JobState.SUCCEEDED,
parsedJob.getState());
}
/**
* Verify that all the events are flushed on stopping the HistoryHandler
* @throws Exception
*/
@Test
public void testEventsFlushOnStop() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithSpecialHistoryHandler(1, 0, true, this
.getClass().getName(), true);
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
// make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
((JobHistory) context).init(conf);
Job parsedJob = context.getJob(jobId);
Assert.assertEquals("CompletedMaps not correct", 1, parsedJob
.getCompletedMaps());
Map<TaskId, Task> tasks = parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct", 1, tasks.size());
verifyTask(tasks.values().iterator().next());
Map<TaskId, Task> maps = parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct", 1, maps.size());
Assert.assertEquals("Job state not currect", JobState.SUCCEEDED,
parsedJob.getState());
}
@Test
public void testJobHistoryEventHandlerIsFirstServiceToStop() {
MRApp app = new MRAppWithSpecialHistoryHandler(1, 0, true, this
.getClass().getName(), true);
Configuration conf = new Configuration();
app.init(conf);
Service[] services = app.getServices().toArray(new Service[0]);
// Verifying that it is the last to be added is same as verifying that it is
// the first to be stopped. CompositeService related tests already validate
// this.
Assert.assertEquals("JobHistoryEventHandler",
services[services.length - 1].getName());
}
@Test
public void testAssignedQueue() throws Exception {
Configuration conf = new Configuration();
MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(),
true, "assignedQueue");
app.submit(conf);
Job job = app.getContext().getAllJobs().values().iterator().next();
JobId jobId = job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job, JobState.SUCCEEDED);
//make sure all events are flushed
app.waitForState(Service.STATE.STOPPED);
/*
* Use HistoryContext to read logged events and verify the number of
* completed maps
*/
HistoryContext context = new JobHistory();
// test start and stop states
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue( context.getStartTime()>0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
// get job before stopping JobHistory
Job parsedJob = context.getJob(jobId);
// stop JobHistory
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("QueueName not correct", "assignedQueue",
parsedJob.getQueueName());
}
private void verifyTask(Task task) {
Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
task.getState());
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
Assert.assertEquals("No of attempts not correct", 1, attempts.size());
for (TaskAttempt attempt : attempts.values()) {
verifyAttempt(attempt);
}
}
private void verifyAttempt(TaskAttempt attempt) {
Assert.assertEquals("TaskAttempt state not currect",
TaskAttemptState.SUCCEEDED, attempt.getState());
Assert.assertNotNull(attempt.getAssignedContainerID());
//Verify the wrong ctor is not being used. Remove after mrv1 is removed.
ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
Assert.assertFalse(attempt.getAssignedContainerID().equals(fakeCid));
//Verify complete contianerManagerAddress
Assert.assertEquals(MRApp.NM_HOST + ":" + MRApp.NM_PORT,
attempt.getAssignedContainerMgrAddress());
}
static class MRAppWithHistory extends MRApp {
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, String assignedQueue) {
super(maps, reduces, autoComplete, testName, cleanOnStart, assignedQueue);
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
return new JobHistoryEventHandler(
context, getStartCount());
}
}
/**
* MRapp with special HistoryEventHandler that writes events only during stop.
* This is to simulate events that don't get written by the eventHandling
* thread due to say a slow DFS and verify that they are flushed during stop.
*/
private static class MRAppWithSpecialHistoryHandler extends MRApp {
public MRAppWithSpecialHistoryHandler(int maps, int reduces,
boolean autoComplete, String testName, boolean cleanOnStart) {
super(maps, reduces, autoComplete, testName, cleanOnStart);
}
@Override
protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
AppContext context) {
return new JobHistoryEventHandler(context, getStartCount()) {
@Override
protected void serviceStart() {
// Don't start any event draining thread.
super.eventHandlingThread = new Thread();
super.eventHandlingThread.start();
}
};
}
}
public static void main(String[] args) throws Exception {
TestJobHistoryEvents t = new TestJobHistoryEvents();
t.testHistoryEvents();
t.testEventsFlushOnStop();
t.testJobHistoryEventHandlerIsFirstServiceToStop();
}
}
| 10,337
| 37.431227
| 87
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import com.google.common.collect.Maps;
public class MockHistoryJobs extends MockJobs {
public static class JobsPair {
public Map<JobId, Job> partial;
public Map<JobId, Job> full;
}
public static JobsPair newHistoryJobs(int numJobs, int numTasksPerJob,
int numAttemptsPerTask) throws IOException {
Map<JobId, Job> mocked = newJobs(numJobs, numTasksPerJob, numAttemptsPerTask);
return split(mocked);
}
public static JobsPair newHistoryJobs(ApplicationId appID, int numJobsPerApp,
int numTasksPerJob, int numAttemptsPerTask) throws IOException {
Map<JobId, Job> mocked = newJobs(appID, numJobsPerApp, numTasksPerJob,
numAttemptsPerTask);
return split(mocked);
}
public static JobsPair newHistoryJobs(ApplicationId appID, int numJobsPerApp,
int numTasksPerJob, int numAttemptsPerTask, boolean hasFailedTasks)
throws IOException {
Map<JobId, Job> mocked = newJobs(appID, numJobsPerApp, numTasksPerJob,
numAttemptsPerTask, hasFailedTasks);
return split(mocked);
}
private static JobsPair split(Map<JobId, Job> mocked) throws IOException {
JobsPair ret = new JobsPair();
ret.full = Maps.newHashMap();
ret.partial = Maps.newHashMap();
for(Map.Entry<JobId, Job> entry: mocked.entrySet()) {
JobId id = entry.getKey();
Job j = entry.getValue();
MockCompletedJob mockJob = new MockCompletedJob(j);
// use MockCompletedJob to set everything below to make sure
// consistent with what history server would do
ret.full.put(id, mockJob);
JobReport report = mockJob.getReport();
JobIndexInfo info = new JobIndexInfo(report.getStartTime(),
report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id,
mockJob.getCompletedMaps(), mockJob.getCompletedReduces(),
String.valueOf(mockJob.getState()));
info.setJobStartTime(report.getStartTime());
info.setQueueName(mockJob.getQueueName());
ret.partial.put(id, new PartialJob(info, id));
}
return ret;
}
private static class MockCompletedJob extends CompletedJob {
private Job job;
public MockCompletedJob(Job job) throws IOException {
super(new Configuration(), job.getID(), null, true, job.getUserName(),
null, null);
this.job = job;
}
@Override
public int getCompletedMaps() {
// we always return total since this is history server
// and PartialJob also assumes completed - total
return job.getTotalMaps();
}
@Override
public int getCompletedReduces() {
// we always return total since this is history server
// and PartialJob also assumes completed - total
return job.getTotalReduces();
}
@Override
public org.apache.hadoop.mapreduce.Counters getAllCounters() {
return job.getAllCounters();
}
@Override
public JobId getID() {
return job.getID();
}
@Override
public JobReport getReport() {
return job.getReport();
}
@Override
public float getProgress() {
return job.getProgress();
}
@Override
public JobState getState() {
return job.getState();
}
@Override
public Task getTask(TaskId taskId) {
return job.getTask(taskId);
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
return job.getTaskAttemptCompletionEvents(fromEventId, maxEvents);
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return job.getMapAttemptCompletionEvents(startIndex, maxEvents);
}
@Override
public Map<TaskId, Task> getTasks() {
return job.getTasks();
}
@Override
protected void loadFullHistoryData(boolean loadTasks,
Path historyFileAbsolute) throws IOException {
//Empty
}
@Override
public List<String> getDiagnostics() {
return job.getDiagnostics();
}
@Override
public String getName() {
return job.getName();
}
@Override
public String getQueueName() {
return job.getQueueName();
}
@Override
public int getTotalMaps() {
return job.getTotalMaps();
}
@Override
public int getTotalReduces() {
return job.getTotalReduces();
}
@Override
public boolean isUber() {
return job.isUber();
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
return job.getTasks();
}
@Override
public
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
return job.checkAccess(callerUGI, jobOperation);
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return job.getJobACLs();
}
@Override
public String getUserName() {
return job.getUserName();
}
@Override
public Path getConfFile() {
return job.getConfFile();
}
@Override
public List<AMInfo> getAMInfos() {
return job.getAMInfos();
}
}
}
| 7,054
| 29.021277
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerMemStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
/**
* A state store backed by memory for unit tests
*/
class HistoryServerMemStateStoreService
extends HistoryServerStateStoreService {
HistoryServerState state;
@Override
protected void initStorage(Configuration conf) throws IOException {
}
@Override
protected void startStorage() throws IOException {
state = new HistoryServerState();
}
@Override
protected void closeStorage() throws IOException {
state = null;
}
@Override
public HistoryServerState loadState() throws IOException {
HistoryServerState result = new HistoryServerState();
result.tokenState.putAll(state.tokenState);
result.tokenMasterKeyState.addAll(state.tokenMasterKeyState);
return result;
}
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
if (state.tokenState.containsKey(tokenId)) {
throw new IOException("token " + tokenId + " was stored twice");
}
state.tokenState.put(tokenId, renewDate);
}
@Override
public void updateToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
if (!state.tokenState.containsKey(tokenId)) {
throw new IOException("token " + tokenId + " not in store");
}
state.tokenState.put(tokenId, renewDate);
}
@Override
public void removeToken(MRDelegationTokenIdentifier tokenId)
throws IOException {
state.tokenState.remove(tokenId);
}
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
if (state.tokenMasterKeyState.contains(key)) {
throw new IOException("token master key " + key + " was stored twice");
}
state.tokenMasterKeyState.add(key);
}
@Override
public void removeTokenMasterKey(DelegationKey key) throws IOException {
state.tokenMasterKeyState.remove(key);
}
}
| 2,920
| 30.408602
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryServerLeveldbStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.yarn.server.records.Version;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestHistoryServerLeveldbStateStoreService {
private static final File testDir = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
"TestHistoryServerLeveldbSystemStateStoreService");
private Configuration conf;
@Before
public void setup() {
FileUtil.fullyDelete(testDir);
testDir.mkdirs();
conf = new Configuration();
conf.setBoolean(JHAdminConfig.MR_HS_RECOVERY_ENABLE, true);
conf.setClass(JHAdminConfig.MR_HS_STATE_STORE,
HistoryServerLeveldbStateStoreService.class,
HistoryServerStateStoreService.class);
conf.set(JHAdminConfig.MR_HS_LEVELDB_STATE_STORE_PATH,
testDir.getAbsoluteFile().toString());
}
@After
public void cleanup() {
FileUtil.fullyDelete(testDir);
}
private HistoryServerStateStoreService createAndStartStore()
throws IOException {
HistoryServerStateStoreService store =
HistoryServerStateStoreServiceFactory.getStore(conf);
assertTrue("Factory did not create a leveldb store",
store instanceof HistoryServerLeveldbStateStoreService);
store.init(conf);
store.start();
return store;
}
@Test
public void testCheckVersion() throws IOException {
HistoryServerLeveldbStateStoreService store =
new HistoryServerLeveldbStateStoreService();
store.init(conf);
store.start();
// default version
Version defaultVersion = store.getCurrentVersion();
assertEquals(defaultVersion, store.loadVersion());
// compatible version
Version compatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion(),
defaultVersion.getMinorVersion() + 2);
store.dbStoreVersion(compatibleVersion);
assertEquals(compatibleVersion, store.loadVersion());
store.close();
store = new HistoryServerLeveldbStateStoreService();
store.init(conf);
store.start();
// overwrite the compatible version
assertEquals(defaultVersion, store.loadVersion());
// incompatible version
Version incompatibleVersion =
Version.newInstance(defaultVersion.getMajorVersion() + 1,
defaultVersion.getMinorVersion());
store.dbStoreVersion(incompatibleVersion);
store.close();
store = new HistoryServerLeveldbStateStoreService();
try {
store.init(conf);
store.start();
fail("Incompatible version, should have thrown before here.");
} catch (ServiceStateException e) {
assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for state:"));
}
store.close();
}
@Test
public void testTokenStore() throws IOException {
HistoryServerStateStoreService store = createAndStartStore();
// verify initially the store is empty
HistoryServerState state = store.loadState();
assertTrue("token state not empty", state.tokenState.isEmpty());
assertTrue("key state not empty", state.tokenMasterKeyState.isEmpty());
// store a key and some tokens
final DelegationKey key1 = new DelegationKey(1, 2, "keyData1".getBytes());
final MRDelegationTokenIdentifier token1 =
new MRDelegationTokenIdentifier(new Text("tokenOwner1"),
new Text("tokenRenewer1"), new Text("tokenUser1"));
token1.setSequenceNumber(1);
final Long tokenDate1 = 1L;
final MRDelegationTokenIdentifier token2 =
new MRDelegationTokenIdentifier(new Text("tokenOwner2"),
new Text("tokenRenewer2"), new Text("tokenUser2"));
token2.setSequenceNumber(12345678);
final Long tokenDate2 = 87654321L;
store.storeTokenMasterKey(key1);
store.storeToken(token1, tokenDate1);
store.storeToken(token2, tokenDate2);
store.close();
// verify the key and tokens can be recovered
store = createAndStartStore();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertTrue("missing token 1", state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date", tokenDate1,
state.tokenState.get(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", tokenDate2,
state.tokenState.get(token2));
assertEquals("incorrect master key count", 1,
state.tokenMasterKeyState.size());
assertTrue("missing master key 1",
state.tokenMasterKeyState.contains(key1));
// store some more keys and tokens, remove the previous key and one
// of the tokens, and renew a previous token
final DelegationKey key2 = new DelegationKey(3, 4, "keyData2".getBytes());
final DelegationKey key3 = new DelegationKey(5, 6, "keyData3".getBytes());
final MRDelegationTokenIdentifier token3 =
new MRDelegationTokenIdentifier(new Text("tokenOwner3"),
new Text("tokenRenewer3"), new Text("tokenUser3"));
token3.setSequenceNumber(12345679);
final Long tokenDate3 = 87654321L;
store.removeToken(token1);
store.storeTokenMasterKey(key2);
final Long newTokenDate2 = 975318642L;
store.updateToken(token2, newTokenDate2);
store.removeTokenMasterKey(key1);
store.storeTokenMasterKey(key3);
store.storeToken(token3, tokenDate3);
store.close();
// verify the new keys and tokens are recovered, the removed key and
// token are no longer present, and the renewed token has the updated
// expiration date
store = createAndStartStore();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertFalse("token 1 not removed", state.tokenState.containsKey(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", newTokenDate2,
state.tokenState.get(token2));
assertTrue("missing token 3", state.tokenState.containsKey(token3));
assertEquals("incorrect token 3 date", tokenDate3,
state.tokenState.get(token3));
assertEquals("incorrect master key count", 2,
state.tokenMasterKeyState.size());
assertFalse("master key 1 not removed",
state.tokenMasterKeyState.contains(key1));
assertTrue("missing master key 2",
state.tokenMasterKeyState.contains(key2));
assertTrue("missing master key 3",
state.tokenMasterKeyState.contains(key3));
store.close();
}
}
| 8,072
| 37.8125
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Map;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.JobListCache;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.junit.After;
import org.junit.Test;
import org.mockito.Mockito;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestJobHistory {
JobHistory jobHistory = null;
@Test
public void testRefreshLoadedJobCache() throws Exception {
HistoryFileManager historyManager = mock(HistoryFileManager.class);
jobHistory = spy(new JobHistory());
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
Configuration conf = new Configuration();
// Set the cache size to 2
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE, "2");
jobHistory.init(conf);
jobHistory.start();
CachedHistoryStorage storage = spy((CachedHistoryStorage) jobHistory
.getHistoryStorage());
Job[] jobs = new Job[3];
JobId[] jobIds = new JobId[3];
for (int i = 0; i < 3; i++) {
jobs[i] = mock(Job.class);
jobIds[i] = mock(JobId.class);
when(jobs[i].getID()).thenReturn(jobIds[i]);
}
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
when(historyManager.getFileInfo(any(JobId.class))).thenReturn(fileInfo);
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1])
.thenReturn(jobs[2]);
// getFullJob will put the job in the cache if it isn't there
for (int i = 0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
Map<JobId, Job> jobCache = storage.getLoadedJobCache();
// job0 should have been purged since cache size is 2
assertFalse(jobCache.containsKey(jobs[0].getID()));
assertTrue(jobCache.containsKey(jobs[1].getID())
&& jobCache.containsKey(jobs[2].getID()));
// Setting cache size to 3
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE, "3");
doReturn(conf).when(storage).createConf();
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1])
.thenReturn(jobs[2]);
jobHistory.refreshLoadedJobCache();
for (int i = 0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
jobCache = storage.getLoadedJobCache();
// All three jobs should be in cache since its size is now 3
for (int i = 0; i < 3; i++) {
assertTrue(jobCache.containsKey(jobs[i].getID()));
}
}
@Test
public void testRefreshJobRetentionSettings() throws IOException,
InterruptedException {
String root = "mockfs://foo/";
String historyDoneDir = root + "mapred/history/done";
long now = System.currentTimeMillis();
long someTimeYesterday = now - (25l * 3600 * 1000);
long timeBefore200Secs = now - (200l * 1000);
// Get yesterday's date in YY/MM/DD format
String timestampComponent = JobHistoryUtils
.timestampDirectoryComponent(someTimeYesterday);
// Create a folder under yesterday's done dir
Path donePathYesterday = new Path(historyDoneDir, timestampComponent + "/"
+ "000000");
FileStatus dirCreatedYesterdayStatus = new FileStatus(0, true, 0, 0,
someTimeYesterday, donePathYesterday);
// Get today's date in YY/MM/DD format
timestampComponent = JobHistoryUtils
.timestampDirectoryComponent(timeBefore200Secs);
// Create a folder under today's done dir
Path donePathToday = new Path(historyDoneDir, timestampComponent + "/"
+ "000000");
FileStatus dirCreatedTodayStatus = new FileStatus(0, true, 0, 0,
timeBefore200Secs, donePathToday);
// Create a jhist file with yesterday's timestamp under yesterday's done dir
Path fileUnderYesterdayDir = new Path(donePathYesterday.toString(),
"job_1372363578825_0015-" + someTimeYesterday + "-user-Sleep+job-"
+ someTimeYesterday + "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderYesterdayDirStatus = new FileStatus(10, false, 0, 0,
someTimeYesterday, fileUnderYesterdayDir);
// Create a jhist file with today's timestamp under today's done dir
Path fileUnderTodayDir = new Path(donePathYesterday.toString(),
"job_1372363578825_0016-" + timeBefore200Secs + "-user-Sleep+job-"
+ timeBefore200Secs + "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderTodayDirStatus = new FileStatus(10, false, 0, 0,
timeBefore200Secs, fileUnderTodayDir);
HistoryFileManager historyManager = spy(new HistoryFileManager());
jobHistory = spy(new JobHistory());
List<FileStatus> fileStatusList = new LinkedList<FileStatus>();
fileStatusList.add(dirCreatedYesterdayStatus);
fileStatusList.add(dirCreatedTodayStatus);
// Make the initial delay of history job cleaner as 4 secs
doReturn(4).when(jobHistory).getInitDelaySecs();
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
List<FileStatus> list1 = new LinkedList<FileStatus>();
list1.add(fileUnderYesterdayDirStatus);
doReturn(list1).when(historyManager).scanDirectoryForHistoryFiles(
eq(donePathYesterday), any(FileContext.class));
List<FileStatus> list2 = new LinkedList<FileStatus>();
list2.add(fileUnderTodayDirStatus);
doReturn(list2).when(historyManager).scanDirectoryForHistoryFiles(
eq(donePathToday), any(FileContext.class));
doReturn(fileStatusList).when(historyManager)
.getHistoryDirsForCleaning(Mockito.anyLong());
doReturn(true).when(historyManager).deleteDir(any(FileStatus.class));
JobListCache jobListCache = mock(JobListCache.class);
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
doReturn(jobListCache).when(historyManager).createJobListCache();
when(jobListCache.get(any(JobId.class))).thenReturn(fileInfo);
doNothing().when(fileInfo).delete();
// Set job retention time to 24 hrs and cleaner interval to 2 secs
Configuration conf = new Configuration();
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, 24l * 3600 * 1000);
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, 2 * 1000);
jobHistory.init(conf);
jobHistory.start();
assertEquals(2 * 1000l, jobHistory.getCleanerInterval());
// Only yesterday's jhist file should get deleted
verify(fileInfo, timeout(20000).times(1)).delete();
fileStatusList.remove(dirCreatedYesterdayStatus);
// Now reset job retention time to 10 secs
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, 10 * 1000);
// Set cleaner interval to 1 sec
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, 1 * 1000);
doReturn(conf).when(jobHistory).createConf();
// Do refresh job retention settings
jobHistory.refreshJobRetentionSettings();
// Cleaner interval should be updated
assertEquals(1 * 1000l, jobHistory.getCleanerInterval());
// Today's jhist file will also be deleted now since it falls below the
// retention threshold
verify(fileInfo, timeout(20000).times(2)).delete();
}
@Test
public void testRefreshLoadedJobCacheUnSupportedOperation() {
jobHistory = spy(new JobHistory());
HistoryStorage storage = new HistoryStorage() {
@Override
public void setHistoryFileManager(HistoryFileManager hsManager) {
// TODO Auto-generated method stub
}
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd,
JobState jobState) {
// TODO Auto-generated method stub
return null;
}
@Override
public Job getFullJob(JobId jobId) {
// TODO Auto-generated method stub
return null;
}
@Override
public Map<JobId, Job> getAllPartialJobs() {
// TODO Auto-generated method stub
return null;
}
};
doReturn(storage).when(jobHistory).createHistoryStorage();
jobHistory.init(new Configuration());
jobHistory.start();
Throwable th = null;
try {
jobHistory.refreshLoadedJobCache();
} catch (Exception e) {
th = e;
}
assertTrue(th instanceof UnsupportedOperationException);
}
@After
public void cleanUp() {
if (jobHistory != null) {
jobHistory.stop();
}
}
}
| 9,829
| 34.615942
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryFileManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileNotFoundException;
import java.util.UUID;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
import org.apache.hadoop.test.CoreTestDriver;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.ControlledClock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.mockito.Mockito.*;
public class TestHistoryFileManager {
private static MiniDFSCluster dfsCluster = null;
private static MiniDFSCluster dfsCluster2 = null;
private static String coreSitePath;
@Rule
public TestName name = new TestName();
@BeforeClass
public static void setUpClass() throws Exception {
coreSitePath = "." + File.separator + "target" + File.separator +
"test-classes" + File.separator + "core-site.xml";
Configuration conf = new HdfsConfiguration();
Configuration conf2 = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
conf2.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
conf.get(MiniDFSCluster.HDFS_MINIDFS_BASEDIR) + "_2");
dfsCluster2 = new MiniDFSCluster.Builder(conf2).build();
}
@AfterClass
public static void cleanUpClass() throws Exception {
dfsCluster.shutdown();
dfsCluster2.shutdown();
}
@After
public void cleanTest() throws Exception {
new File(coreSitePath).delete();
}
private String getDoneDirNameForTest() {
return "/" + name.getMethodName();
}
private String getIntermediateDoneDirNameForTest() {
return "/intermediate_" + name.getMethodName();
}
private void testTryCreateHistoryDirs(Configuration conf, boolean expected)
throws Exception {
conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, getDoneDirNameForTest());
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, getIntermediateDoneDirNameForTest());
HistoryFileManager hfm = new HistoryFileManager();
hfm.conf = conf;
Assert.assertEquals(expected, hfm.tryCreatingHistoryDirs(false));
}
@Test
public void testCreateDirsWithoutFileSystem() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost:1");
testTryCreateHistoryDirs(conf, false);
}
@Test
public void testCreateDirsWithFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
}
@Test
public void testCreateDirsWithAdditionalFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
dfsCluster2.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());
// Set default configuration to the first cluster
Configuration conf = new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0), true);
// Directories should be created only in the default file system (dfsCluster)
Assert.assertTrue(dfsCluster.getFileSystem()
.exists(new Path(getDoneDirNameForTest())));
Assert.assertTrue(dfsCluster.getFileSystem()
.exists(new Path(getIntermediateDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem()
.exists(new Path(getDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem()
.exists(new Path(getIntermediateDoneDirNameForTest())));
}
@Test
public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), false);
}
private void testCreateHistoryDirs(Configuration conf, Clock clock)
throws Exception {
conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR, "/" + UUID.randomUUID());
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR, "/" + UUID.randomUUID());
HistoryFileManager hfm = new HistoryFileManager();
hfm.conf = conf;
hfm.createHistoryDirs(clock, 500, 2000);
}
@Test
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout()
throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
new Thread() {
@Override
public void run() {
try {
Thread.sleep(500);
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
} catch (Exception ex) {
Assert.fail(ex.toString());
}
}
}.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock());
}
@Test(expected = YarnRuntimeException.class)
public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout()
throws Exception {
dfsCluster.getFileSystem().setSafeMode(
HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
final ControlledClock clock = new ControlledClock(new SystemClock());
clock.setTime(1);
new Thread() {
@Override
public void run() {
try {
Thread.sleep(500);
clock.setTime(3000);
} catch (Exception ex) {
Assert.fail(ex.toString());
}
}
}.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0), clock);
}
@Test
public void testScanDirectory() throws Exception {
Path p = new Path("any");
FileContext fc = mock(FileContext.class);
when(fc.makeQualified(p)).thenReturn(p);
when(fc.listStatus(p)).thenThrow(new FileNotFoundException());
List<FileStatus> lfs = HistoryFileManager.scanDirectory(p, fc, null);
//primarily, succcess is that an exception was not thrown. Also nice to
//check this
Assert.assertNotNull(lfs);
}
@Test
public void testHistoryFileInfoSummaryFileNotExist() throws Exception {
HistoryFileManagerTest hmTest = new HistoryFileManagerTest();
String job = "job_1410889000000_123456";
Path summaryFile = new Path(job + ".summary");
JobIndexInfo jobIndexInfo = new JobIndexInfo();
jobIndexInfo.setJobId(TypeConverter.toYarn(JobID.forName(job)));
Configuration conf = dfsCluster.getConfiguration(0);
conf.set(JHAdminConfig.MR_HISTORY_DONE_DIR,
"/" + UUID.randomUUID());
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,
"/" + UUID.randomUUID());
hmTest.serviceInit(conf);
HistoryFileInfo info = hmTest.getHistoryFileInfo(null, null,
summaryFile, jobIndexInfo, false);
info.moveToDone();
Assert.assertFalse(info.didMoveFail());
}
static class HistoryFileManagerTest extends HistoryFileManager {
public HistoryFileManagerTest() {
super();
}
public HistoryFileInfo getHistoryFileInfo(Path historyFile,
Path confFile, Path summaryFile, JobIndexInfo jobIndexInfo,
boolean isInDone) {
return new HistoryFileInfo(historyFile, confFile, summaryFile,
jobIndexInfo, isInDone);
}
}
}
| 9,628
| 36.034615
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestCompletedTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.hs.CompletedTask;
import org.junit.Test;
import static org.mockito.Mockito.*;
import static org.junit.Assert.*;
public class TestCompletedTask{
@Test (timeout=5000)
public void testTaskStartTimes() {
TaskId taskId = mock(TaskId.class);
TaskInfo taskInfo = mock(TaskInfo.class);
Map<TaskAttemptID, TaskAttemptInfo> taskAttempts
= new TreeMap<TaskAttemptID, TaskAttemptInfo>();
TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0);
TaskAttemptInfo info = mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(10l);
taskAttempts.put(id, info);
id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1);
info = mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(20l);
taskAttempts.put(id, info);
when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
CompletedTask task = new CompletedTask(taskId, taskInfo);
TaskReport report = task.getReport();
// Make sure the startTime returned by report is the lesser of the
// attempy launch times
assertTrue(report.getStartTime() == 10);
}
/**
* test some methods of CompletedTaskAttempt
*/
@Test (timeout=5000)
public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId= new JobID("12345",0);
TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
assertEquals( "Rackname", taskAttemt.getNodeRackName());
assertEquals( Phase.CLEANUP, taskAttemt.getPhase());
assertTrue( taskAttemt.isFinished());
assertEquals( 11L, taskAttemt.getShuffleFinishTime());
assertEquals( 12L, taskAttemt.getSortFinishTime());
assertEquals( 10, taskAttemt.getShufflePort());
}
}
| 3,741
| 37.979167
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestHistoryServerFileSystemStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.argThat;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.isA;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
public class TestHistoryServerFileSystemStateStoreService {
private static final File testDir = new File(
System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
"TestHistoryServerFileSystemStateStoreService");
private Configuration conf;
@Before
public void setup() {
FileUtil.fullyDelete(testDir);
testDir.mkdirs();
conf = new Configuration();
conf.setBoolean(JHAdminConfig.MR_HS_RECOVERY_ENABLE, true);
conf.setClass(JHAdminConfig.MR_HS_STATE_STORE,
HistoryServerFileSystemStateStoreService.class,
HistoryServerStateStoreService.class);
conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,
testDir.getAbsoluteFile().toURI().toString());
}
@After
public void cleanup() {
FileUtil.fullyDelete(testDir);
}
private HistoryServerStateStoreService createAndStartStore()
throws IOException {
HistoryServerStateStoreService store =
HistoryServerStateStoreServiceFactory.getStore(conf);
assertTrue("Factory did not create a filesystem store",
store instanceof HistoryServerFileSystemStateStoreService);
store.init(conf);
store.start();
return store;
}
private void testTokenStore(String stateStoreUri) throws IOException {
conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI, stateStoreUri);
HistoryServerStateStoreService store = createAndStartStore();
HistoryServerState state = store.loadState();
assertTrue("token state not empty", state.tokenState.isEmpty());
assertTrue("key state not empty", state.tokenMasterKeyState.isEmpty());
final DelegationKey key1 = new DelegationKey(1, 2, "keyData1".getBytes());
final MRDelegationTokenIdentifier token1 =
new MRDelegationTokenIdentifier(new Text("tokenOwner1"),
new Text("tokenRenewer1"), new Text("tokenUser1"));
token1.setSequenceNumber(1);
final Long tokenDate1 = 1L;
final MRDelegationTokenIdentifier token2 =
new MRDelegationTokenIdentifier(new Text("tokenOwner2"),
new Text("tokenRenewer2"), new Text("tokenUser2"));
token2.setSequenceNumber(12345678);
final Long tokenDate2 = 87654321L;
store.storeTokenMasterKey(key1);
try {
store.storeTokenMasterKey(key1);
fail("redundant store of key undetected");
} catch (IOException e) {
// expected
}
store.storeToken(token1, tokenDate1);
store.storeToken(token2, tokenDate2);
try {
store.storeToken(token1, tokenDate1);
fail("redundant store of token undetected");
} catch (IOException e) {
// expected
}
store.close();
store = createAndStartStore();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertTrue("missing token 1", state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date", tokenDate1,
state.tokenState.get(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", tokenDate2,
state.tokenState.get(token2));
assertEquals("incorrect master key count", 1,
state.tokenMasterKeyState.size());
assertTrue("missing master key 1",
state.tokenMasterKeyState.contains(key1));
final DelegationKey key2 = new DelegationKey(3, 4, "keyData2".getBytes());
final DelegationKey key3 = new DelegationKey(5, 6, "keyData3".getBytes());
final MRDelegationTokenIdentifier token3 =
new MRDelegationTokenIdentifier(new Text("tokenOwner3"),
new Text("tokenRenewer3"), new Text("tokenUser3"));
token3.setSequenceNumber(12345679);
final Long tokenDate3 = 87654321L;
store.removeToken(token1);
store.storeTokenMasterKey(key2);
final Long newTokenDate2 = 975318642L;
store.updateToken(token2, newTokenDate2);
store.removeTokenMasterKey(key1);
store.storeTokenMasterKey(key3);
store.storeToken(token3, tokenDate3);
store.close();
store = createAndStartStore();
state = store.loadState();
assertEquals("incorrect loaded token count", 2, state.tokenState.size());
assertFalse("token 1 not removed", state.tokenState.containsKey(token1));
assertTrue("missing token 2", state.tokenState.containsKey(token2));
assertEquals("incorrect token 2 date", newTokenDate2,
state.tokenState.get(token2));
assertTrue("missing token 3", state.tokenState.containsKey(token3));
assertEquals("incorrect token 3 date", tokenDate3,
state.tokenState.get(token3));
assertEquals("incorrect master key count", 2,
state.tokenMasterKeyState.size());
assertFalse("master key 1 not removed",
state.tokenMasterKeyState.contains(key1));
assertTrue("missing master key 2",
state.tokenMasterKeyState.contains(key2));
assertTrue("missing master key 3",
state.tokenMasterKeyState.contains(key3));
}
@Test
public void testTokenStore() throws IOException {
testTokenStore(testDir.getAbsoluteFile().toURI().toString());
}
@Test
public void testTokenStoreHdfs() throws IOException {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
conf = cluster.getConfiguration(0);
try {
testTokenStore("/tmp/historystore");
} finally {
cluster.shutdown();
}
}
@Test
public void testUpdatedTokenRecovery() throws IOException {
IOException intentionalErr = new IOException("intentional error");
FileSystem fs = FileSystem.getLocal(conf);
final FileSystem spyfs = spy(fs);
// make the update token process fail halfway through where we're left
// with just the temporary update file and no token file
ArgumentMatcher<Path> updateTmpMatcher = new ArgumentMatcher<Path>() {
@Override
public boolean matches(Object argument) {
if (argument instanceof Path) {
return ((Path) argument).getName().startsWith("update");
}
return false;
}
};
doThrow(intentionalErr)
.when(spyfs).rename(argThat(updateTmpMatcher), isA(Path.class));
conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,
testDir.getAbsoluteFile().toURI().toString());
HistoryServerStateStoreService store =
new HistoryServerFileSystemStateStoreService() {
@Override
FileSystem createFileSystem() throws IOException {
return spyfs;
}
};
store.init(conf);
store.start();
final MRDelegationTokenIdentifier token1 =
new MRDelegationTokenIdentifier(new Text("tokenOwner1"),
new Text("tokenRenewer1"), new Text("tokenUser1"));
token1.setSequenceNumber(1);
final Long tokenDate1 = 1L;
store.storeToken(token1, tokenDate1);
final Long newTokenDate1 = 975318642L;
try {
store.updateToken(token1, newTokenDate1);
fail("intentional error not thrown");
} catch (IOException e) {
assertEquals(intentionalErr, e);
}
store.close();
// verify the update file is seen and parsed upon recovery when
// original token file is missing
store = createAndStartStore();
HistoryServerState state = store.loadState();
assertEquals("incorrect loaded token count", 1, state.tokenState.size());
assertTrue("missing token 1", state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date", newTokenDate1,
state.tokenState.get(token1));
store.close();
}
}
| 9,348
| 37.004065
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.junit.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(value = Parameterized.class)
public class TestJobHistoryEntities {
private final String historyFileName =
"job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
private final String historyFileNameZeroReduceTasks =
"job_1416424547277_0002-1416424775281-root-TeraGen-1416424785433-2-0-SUCCEEDED-default-1416424779349.jhist";
private final String confFileName = "job_1329348432655_0001_conf.xml";
private final Configuration conf = new Configuration();
private final JobACLsManager jobAclsManager = new JobACLsManager(conf);
private boolean loadTasks;
private JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
Path fullHistoryPath =
new Path(this.getClass().getClassLoader().getResource(historyFileName)
.getFile());
Path fullHistoryPathZeroReduces =
new Path(this.getClass().getClassLoader().getResource(historyFileNameZeroReduceTasks)
.getFile());
Path fullConfPath =
new Path(this.getClass().getClassLoader().getResource(confFileName)
.getFile());
private CompletedJob completedJob;
public TestJobHistoryEntities(boolean loadTasks) throws Exception {
this.loadTasks = loadTasks;
}
@Parameters
public static Collection<Object[]> data() {
List<Object[]> list = new ArrayList<Object[]>(2);
list.add(new Object[] { true });
list.add(new Object[] { false });
return list;
}
/* Verify some expected values based on the history file */
@Test (timeout=100000)
public void testCompletedJob() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
//Re-initialize to verify the delayed load.
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
//Verify tasks loaded based on loadTask parameter.
assertEquals(loadTasks, completedJob.tasksLoaded.get());
assertEquals(1, completedJob.getAMInfos().size());
assertEquals(10, completedJob.getCompletedMaps());
assertEquals(1, completedJob.getCompletedReduces());
assertEquals(12, completedJob.getTasks().size());
//Verify tasks loaded at this point.
assertEquals(true, completedJob.tasksLoaded.get());
assertEquals(10, completedJob.getTasks(TaskType.MAP).size());
assertEquals(2, completedJob.getTasks(TaskType.REDUCE).size());
assertEquals("user", completedJob.getUserName());
assertEquals(JobState.SUCCEEDED, completedJob.getState());
JobReport jobReport = completedJob.getReport();
assertEquals("user", jobReport.getUser());
assertEquals(JobState.SUCCEEDED, jobReport.getJobState());
}
@Test (timeout=100000)
public void testCopmletedJobReportWithZeroTasks() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPathZeroReduces, loadTasks, "user",
info, jobAclsManager);
JobReport jobReport = completedJob.getReport();
// Make sure that the number reduces (completed and total) are equal to zero.
assertEquals(0, completedJob.getTotalReduces());
assertEquals(0, completedJob.getCompletedReduces());
// Verify that the reduce progress is 1.0 (not NaN)
assertEquals(1.0, jobReport.getReduceProgress(), 0.001);
}
@Test (timeout=10000)
public void testCompletedTask() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
Map<TaskId, Task> mapTasks = completedJob.getTasks(TaskType.MAP);
Map<TaskId, Task> reduceTasks = completedJob.getTasks(TaskType.REDUCE);
assertEquals(10, mapTasks.size());
assertEquals(2, reduceTasks.size());
Task mt1 = mapTasks.get(mt1Id);
assertEquals(1, mt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED, mt1.getState());
TaskReport mt1Report = mt1.getReport();
assertEquals(TaskState.SUCCEEDED, mt1Report.getTaskState());
assertEquals(mt1Id, mt1Report.getTaskId());
Task rt1 = reduceTasks.get(rt1Id);
assertEquals(1, rt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED, rt1.getState());
TaskReport rt1Report = rt1.getReport();
assertEquals(TaskState.SUCCEEDED, rt1Report.getTaskState());
assertEquals(rt1Id, rt1Report.getTaskId());
}
@Test (timeout=10000)
public void testCompletedTaskAttempt() throws Exception {
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
Task mt1 = completedJob.getTask(mt1Id);
Task rt1 = completedJob.getTask(rt1Id);
TaskAttempt mta1 = mt1.getAttempt(mta1Id);
assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999", mta1.getNodeHttpAddress());
TaskAttemptReport mta1Report = mta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
assertEquals("localhost", mta1Report.getNodeManagerHost());
assertEquals(45454, mta1Report.getNodeManagerPort());
assertEquals(9999, mta1Report.getNodeManagerHttpPort());
TaskAttempt rta1 = rt1.getAttempt(rta1Id);
assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999", rta1.getNodeHttpAddress());
TaskAttemptReport rta1Report = rta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
assertEquals("localhost", rta1Report.getNodeManagerHost());
assertEquals(45454, rta1Report.getNodeManagerPort());
assertEquals(9999, rta1Report.getNodeManagerHttpPort());
}
/**
* Simple test of some methods of CompletedJob
* @throws Exception
*/
@Test (timeout=30000)
public void testGetTaskAttemptCompletionEvent() throws Exception{
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob =
new CompletedJob(conf, jobId, fullHistoryPath, loadTasks, "user",
info, jobAclsManager);
TaskCompletionEvent[] events= completedJob.getMapAttemptCompletionEvents(0,1000);
assertEquals(10, completedJob.getMapAttemptCompletionEvents(0,10).length);
int currentEventId=0;
for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
int eventId= taskAttemptCompletionEvent.getEventId();
assertTrue(eventId>=currentEventId);
currentEventId=eventId;
}
assertNull(completedJob.loadConfFile() );
// job name
assertEquals("Sleep job",completedJob.getName());
// queue name
assertEquals("default",completedJob.getQueueName());
// progress
assertEquals(1.0, completedJob.getProgress(),0.001);
// 12 rows in answer
assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
// select first 10 rows
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
// select 5-10 rows include 5th
assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);
// without errors
assertEquals(1,completedJob.getDiagnostics().size());
assertEquals("",completedJob.getDiagnostics().get(0));
assertEquals(0, completedJob.getJobACLs().size());
}
}
| 10,399
| 43.255319
| 112
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.APP_ID;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.ATTEMPT_STATE;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_OWNER;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_ID;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.CONTAINER_LOG_TYPE;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.ENTITY_STRING;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.NM_NODENAME;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.verify;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.MRApp;
import org.apache.hadoop.mapreduce.v2.app.MockAppContext;
import org.apache.hadoop.mapreduce.v2.app.MockJobs;
import org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsPage;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
import com.google.inject.AbstractModule;
import com.google.inject.Injector;
public class TestHSWebApp {
private static final Log LOG = LogFactory.getLog(TestHSWebApp.class);
@Test public void testAppControllerIndex() {
MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
Injector injector = WebAppTests.createMockInjector(AppContext.class, ctx);
HsController controller = injector.getInstance(HsController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(), controller.get(APP_ID,""));
}
@Test public void testJobView() {
LOG.info("HsJobPage");
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = TestAMWebApp.getJobParams(appContext);
WebAppTests.testPage(HsJobPage.class, AppContext.class, appContext, params);
}
@Test
public void testTasksView() {
LOG.info("HsTasksPage");
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
WebAppTests.testPage(HsTasksPage.class, AppContext.class, appContext,
params);
}
@Test
public void testTaskView() {
LOG.info("HsTaskPage");
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
WebAppTests
.testPage(HsTaskPage.class, AppContext.class, appContext, params);
}
@Test public void testAttemptsWithJobView() {
LOG.info("HsAttemptsPage with data");
MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
JobId id = ctx.getAllJobs().keySet().iterator().next();
Map<String, String> params = new HashMap<String,String>();
params.put(JOB_ID, id.toString());
params.put(TASK_TYPE, "m");
params.put(ATTEMPT_STATE, "SUCCESSFUL");
WebAppTests.testPage(HsAttemptsPage.class, AppContext.class,
ctx, params);
}
@Test public void testAttemptsView() {
LOG.info("HsAttemptsPage");
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = TestAMWebApp.getTaskParams(appContext);
WebAppTests.testPage(HsAttemptsPage.class, AppContext.class,
appContext, params);
}
@Test public void testConfView() {
LOG.info("HsConfPage");
WebAppTests.testPage(HsConfPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
}
@Test public void testAboutView() {
LOG.info("HsAboutPage");
WebAppTests.testPage(HsAboutPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
}
@Test public void testJobCounterView() {
LOG.info("JobCounterView");
AppContext appContext = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = TestAMWebApp.getJobParams(appContext);
WebAppTests.testPage(HsCountersPage.class, AppContext.class,
appContext, params);
}
@Test public void testJobCounterViewForKilledJob() {
LOG.info("JobCounterViewForKilledJob");
AppContext appContext = new MockAppContext(0, 1, 1, 1, true);
Map<String, String> params = TestAMWebApp.getJobParams(appContext);
WebAppTests.testPage(HsCountersPage.class, AppContext.class,
appContext, params);
}
@Test public void testSingleCounterView() {
LOG.info("HsSingleCounterPage");
WebAppTests.testPage(HsSingleCounterPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
}
@Test
public void testLogsView1() throws IOException {
LOG.info("HsLogsPage");
Injector injector =
WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class,
new MockAppContext(0, 1, 1, 1));
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write("Cannot get container logs without a ContainerId");
verify(spyPw).write("Cannot get container logs without a NodeId");
verify(spyPw).write("Cannot get container logs without an app owner");
}
@Test
public void testLogsView2() throws IOException {
LOG.info("HsLogsPage with data");
MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = new HashMap<String, String>();
params.put(CONTAINER_ID, MRApp.newContainerId(1, 1, 333, 1)
.toString());
params.put(NM_NODENAME,
NodeId.newInstance(MockJobs.NM_HOST, MockJobs.NM_PORT).toString());
params.put(ENTITY_STRING, "container_10_0001_01_000001");
params.put(APP_OWNER, "owner");
Injector injector =
WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class, ctx,
params);
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write(
"Aggregation is not enabled. Try the nodemanager at "
+ MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
}
@Test
public void testLogsViewSingle() throws IOException {
LOG.info("HsLogsPage with params for single log and data limits");
MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = new HashMap<String, String>();
final Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
params.put("start", "-2048");
params.put("end", "-1024");
params.put(CONTAINER_LOG_TYPE, "syslog");
params.put(CONTAINER_ID, MRApp.newContainerId(1, 1, 333, 1)
.toString());
params.put(NM_NODENAME,
NodeId.newInstance(MockJobs.NM_HOST, MockJobs.NM_PORT).toString());
params.put(ENTITY_STRING, "container_10_0001_01_000001");
params.put(APP_OWNER, "owner");
Injector injector =
WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class, ctx,
params, new AbstractModule() {
@Override
protected void configure() {
bind(Configuration.class).toInstance(conf);
}
});
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write(
"Logs not available for container_10_0001_01_000001."
+ " Aggregation may not be complete, "
+ "Check back later or try the nodemanager at "
+ MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
}
@Test
public void testLogsViewBadStartEnd() throws IOException {
LOG.info("HsLogsPage with bad start/end params");
MockAppContext ctx = new MockAppContext(0, 1, 1, 1);
Map<String, String> params = new HashMap<String, String>();
params.put("start", "foo");
params.put("end", "bar");
params.put(CONTAINER_ID, MRApp.newContainerId(1, 1, 333, 1)
.toString());
params.put(NM_NODENAME,
NodeId.newInstance(MockJobs.NM_HOST, MockJobs.NM_PORT).toString());
params.put(ENTITY_STRING, "container_10_0001_01_000001");
params.put(APP_OWNER, "owner");
Injector injector =
WebAppTests.testPage(AggregatedLogsPage.class, AppContext.class, ctx,
params);
PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
verify(spyPw).write("Invalid log start value: foo");
verify(spyPw).write("Invalid log end value: bar");
}
}
| 9,553
| 38.808333
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the history server Rest API for getting jobs with various query
* parameters.
*
* /ws/v1/history/mapreduce/jobs?{query=value}
*/
public class TestHsWebServicesJobsQuery extends JerseyTest {
private static Configuration conf = new Configuration();
private static MockHistoryContext appContext;
private static HsWebApp webApp;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(3, 2, 1);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestHsWebServicesJobsQuery() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testJobsQueryStateNone() throws JSONException, Exception {
WebResource r = resource();
ArrayList<JobState> JOB_STATES =
new ArrayList<JobState>(Arrays.asList(JobState.values()));
// find a state that isn't in use
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
JOB_STATES.remove(entry.getValue().getState());
}
assertTrue("No unused job states", JOB_STATES.size() > 0);
JobState notInUse = JOB_STATES.get(0);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("state", notInUse.toString())
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
@Test
public void testJobsQueryState() throws JSONException, Exception {
WebResource r = resource();
// we only create 3 jobs and it cycles through states so we should have 3 unique states
Map<JobId, Job> jobsMap = appContext.getAllJobs();
String queryState = "BOGUS";
JobId jid = null;
for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
jid = entry.getValue().getID();
queryState = entry.getValue().getState().toString();
break;
}
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("state", queryState)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 1, arr.length());
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getPartialJob(jid);
VerifyJobsUtils.verifyHsJobPartial(info, job);
}
@Test
public void testJobsQueryStateInvalid() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("state", "InvalidState")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringContains(
"exception message",
"org.apache.hadoop.mapreduce.v2.api.records.JobState.InvalidState",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"IllegalArgumentException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"java.lang.IllegalArgumentException", classname);
}
@Test
public void testJobsQueryUserNone() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("user", "bogus")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
@Test
public void testJobsQueryUser() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("user", "mock")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
System.out.println(json.toString());
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 3, arr.length());
// just verify one of them.
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info, job);
}
@Test
public void testJobsQueryLimit() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("limit", "2")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
// make sure we get 2 back
assertEquals("incorrect number of elements", 2, arr.length());
}
@Test
public void testJobsQueryLimitInvalid() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("limit", "-1")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: limit value must be greater then 0", message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryQueue() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("queue", "mockqueue")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 3, arr.length());
}
@Test
public void testJobsQueryQueueNonExist() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("queue", "bogus")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
@Test
public void testJobsQueryStartTimeEnd() throws JSONException, Exception {
WebResource r = resource();
// the mockJobs start time is the current time - some random amount
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeEnd", String.valueOf(now))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 3, arr.length());
}
@Test
public void testJobsQueryStartTimeBegin() throws JSONException, Exception {
WebResource r = resource();
// the mockJobs start time is the current time - some random amount
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeBegin", String.valueOf(now))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
@Test
public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
int size = jobsMap.size();
ArrayList<Long> startTime = new ArrayList<Long>(size);
// figure out the middle start Time
for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
startTime.add(entry.getValue().getReport().getStartTime());
}
Collections.sort(startTime);
assertTrue("Error we must have atleast 3 jobs", size >= 3);
long midStartTime = startTime.get(size - 2);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeBegin", String.valueOf(40000))
.queryParam("startedTimeEnd", String.valueOf(midStartTime))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", size - 1, arr.length());
}
@Test
public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException,
Exception {
WebResource r = resource();
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeBegin", String.valueOf(now))
.queryParam("startedTimeEnd", String.valueOf(40000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: startedTimeEnd must be greater than startTimeBegin",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryStartTimeInvalidformat() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("startedTimeBegin", "efsd")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: Invalid number format: For input string: \"efsd\"",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryStartTimeEndInvalidformat() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("startedTimeEnd", "efsd")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: Invalid number format: For input string: \"efsd\"",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryStartTimeNegative() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeBegin", String.valueOf(-1000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch("exception message",
"java.lang.Exception: startedTimeBegin must be greater than 0",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryStartTimeEndNegative() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("startedTimeEnd", String.valueOf(-1000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: startedTimeEnd must be greater than 0", message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeEndNegative() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeEnd", String.valueOf(-1000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: finishedTimeEnd must be greater than 0", message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeBeginNegative() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeBegin", String.valueOf(-1000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: finishedTimeBegin must be greater than 0",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException,
Exception {
WebResource r = resource();
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeBegin", String.valueOf(now))
.queryParam("finishedTimeEnd", String.valueOf(40000))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: finishedTimeEnd must be greater than finishedTimeBegin",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeInvalidformat() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("finishedTimeBegin", "efsd")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: Invalid number format: For input string: \"efsd\"",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException,
Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").queryParam("finishedTimeEnd", "efsd")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils
.checkStringMatch(
"exception message",
"java.lang.Exception: Invalid number format: For input string: \"efsd\"",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
@Test
public void testJobsQueryFinishTimeBegin() throws JSONException, Exception {
WebResource r = resource();
// the mockJobs finish time is the current time + some random amount
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeBegin", String.valueOf(now))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 3, arr.length());
}
@Test
public void testJobsQueryFinishTimeEnd() throws JSONException, Exception {
WebResource r = resource();
// the mockJobs finish time is the current time + some random amount
Long now = System.currentTimeMillis();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeEnd", String.valueOf(now))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
}
@Test
public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
int size = jobsMap.size();
// figure out the mid end time - we expect atleast 3 jobs
ArrayList<Long> finishTime = new ArrayList<Long>(size);
for (Map.Entry<JobId, Job> entry : jobsMap.entrySet()) {
finishTime.add(entry.getValue().getReport().getFinishTime());
}
Collections.sort(finishTime);
assertTrue("Error we must have atleast 3 jobs", size >= 3);
long midFinishTime = finishTime.get(size - 2);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs")
.queryParam("finishedTimeBegin", String.valueOf(40000))
.queryParam("finishedTimeEnd", String.valueOf(midFinishTime))
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", size - 1, arr.length());
}
}
| 31,214
| 45.244444
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.apache.hadoop.yarn.util.StringHelper.join;
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.StringReader;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the history server Rest API for getting jobs, a specific job, job
* counters, and job attempts.
*
* /ws/v1/history/mapreduce/jobs /ws/v1/history/mapreduce/jobs/{jobid}
* /ws/v1/history/mapreduce/jobs/{jobid}/counters
* /ws/v1/history/mapreduce/jobs/{jobid}/jobattempts
*/
public class TestHsWebServicesJobs extends JerseyTest {
private static Configuration conf = new Configuration();
private static MockHistoryContext appContext;
private static HsWebApp webApp;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 2, 1, false);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestHsWebServicesJobs() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testJobs() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 1, arr.length());
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info, job);
}
@Test
public void testJobsSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 1, arr.length());
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info, job);
}
@Test
public void testJobsDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject jobs = json.getJSONObject("jobs");
JSONArray arr = jobs.getJSONArray("job");
assertEquals("incorrect number of elements", 1, arr.length());
JSONObject info = arr.getJSONObject(0);
Job job = appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info, job);
}
@Test
public void testJobsXML() throws Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList jobs = dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements", 1, jobs.getLength());
NodeList job = dom.getElementsByTagName("job");
assertEquals("incorrect number of elements", 1, job.getLength());
verifyHsJobPartialXML(job, appContext);
}
public void verifyHsJobPartialXML(NodeList nodes, MockHistoryContext appContext) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
Job job = appContext.getPartialJob(MRApps.toJobID(WebServicesTestUtils
.getXmlString(element, "id")));
assertNotNull("Job not found - output incorrect", job);
VerifyJobsUtils.verifyHsJobGeneric(job,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlString(element, "name"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "queue"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlInt(element, "mapsTotal"),
WebServicesTestUtils.getXmlInt(element, "mapsCompleted"),
WebServicesTestUtils.getXmlInt(element, "reducesTotal"),
WebServicesTestUtils.getXmlInt(element, "reducesCompleted"));
}
}
public void verifyHsJobXML(NodeList nodes, AppContext appContext) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
Job job = appContext.getJob(MRApps.toJobID(WebServicesTestUtils
.getXmlString(element, "id")));
assertNotNull("Job not found - output incorrect", job);
VerifyJobsUtils.verifyHsJobGeneric(job,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "user"),
WebServicesTestUtils.getXmlString(element, "name"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "queue"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlInt(element, "mapsTotal"),
WebServicesTestUtils.getXmlInt(element, "mapsCompleted"),
WebServicesTestUtils.getXmlInt(element, "reducesTotal"),
WebServicesTestUtils.getXmlInt(element, "reducesCompleted"));
// restricted access fields - if security and acls set
VerifyJobsUtils.verifyHsJobGenericSecure(job,
WebServicesTestUtils.getXmlBoolean(element, "uberized"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlLong(element, "avgMapTime"),
WebServicesTestUtils.getXmlLong(element, "avgReduceTime"),
WebServicesTestUtils.getXmlLong(element, "avgShuffleTime"),
WebServicesTestUtils.getXmlLong(element, "avgMergeTime"),
WebServicesTestUtils.getXmlInt(element, "failedReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "killedReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "successfulReduceAttempts"),
WebServicesTestUtils.getXmlInt(element, "failedMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "killedMapAttempts"),
WebServicesTestUtils.getXmlInt(element, "successfulMapAttempts"));
}
}
@Test
public void testJobId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info, appContext.getJob(id));
}
}
@Test
public void testJobIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId + "/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info, appContext.getJob(id));
}
}
@Test
public void testJobIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info, appContext.getJob(id));
}
}
@Test
public void testJobIdNonExist() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path("job_0_1234").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: job, job_0_1234, is not found", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testJobIdInvalid() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path("job_foo").accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
// verify the exception output default is JSON
@Test
public void testJobIdInvalidDefault() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path("job_foo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
// test that the exception output works in XML
@Test
public void testJobIdInvalidXML() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path("job_foo").accept(MediaType.APPLICATION_XML)
.get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String msg = response.getEntity(String.class);
System.out.println(msg);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("RemoteException");
Element element = (Element) nodes.item(0);
String message = WebServicesTestUtils.getXmlString(element, "message");
String type = WebServicesTestUtils.getXmlString(element, "exception");
String classname = WebServicesTestUtils.getXmlString(element,
"javaClassName");
verifyJobIdInvalid(message, type, classname);
}
}
private void verifyJobIdInvalid(String message, String type, String classname) {
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: JobId string : job_foo is not properly formed",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
@Test
public void testJobIdInvalidBogus() throws JSONException, Exception {
WebResource r = resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path("bogusfoo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: JobId string : "
+ "bogusfoo is not properly formed", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
@Test
public void testJobIdXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId)
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList job = dom.getElementsByTagName("job");
verifyHsJobXML(job, appContext);
}
}
@Test
public void testJobCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("counters")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyHsJobCounters(info, appContext.getJob(id));
}
}
@Test
public void testJobCountersSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("counters/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyHsJobCounters(info, appContext.getJob(id));
}
}
@Test
public void testJobCountersForKilledJob() throws Exception {
WebResource r = resource();
appContext = new MockHistoryContext(0, 1, 1, 1, true);
injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("counters/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(id),
info.getString("id"));
assertTrue("Job shouldn't contain any counters", info.length() == 1);
}
}
@Test
public void testJobCountersDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("counters/")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobCounters");
verifyHsJobCounters(info, appContext.getJob(id));
}
}
@Test
public void testJobCountersXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("counters")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("jobCounters");
verifyHsJobCountersXML(info, appContext.getJob(id));
}
}
public void verifyHsJobCounters(JSONObject info, Job job)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("counterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long mapValue = counter.getLong("mapCounterValue");
assertTrue("mapCounterValue >= 0", mapValue >= 0);
long reduceValue = counter.getLong("reduceCounterValue");
assertTrue("reduceCounterValue >= 0", reduceValue >= 0);
long totalValue = counter.getLong("totalCounterValue");
assertTrue("totalCounterValue >= 0", totalValue >= 0);
}
}
}
public void verifyHsJobCountersXML(NodeList nodes, Job job) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
assertNotNull("Job not found - output incorrect", job);
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("counterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long mapValue = WebServicesTestUtils.getXmlLong(counter,
"mapCounterValue");
assertTrue("mapCounterValue not >= 0", mapValue >= 0);
long reduceValue = WebServicesTestUtils.getXmlLong(counter,
"reduceCounterValue");
assertTrue("reduceCounterValue >= 0", reduceValue >= 0);
long totalValue = WebServicesTestUtils.getXmlLong(counter,
"totalCounterValue");
assertTrue("totalCounterValue >= 0", totalValue >= 0);
}
}
}
}
@Test
public void testJobAttempts() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info, appContext.getJob(id));
}
}
@Test
public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info, appContext.getJob(id));
}
}
@Test
public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info, appContext.getJob(id));
}
}
@Test
public void testJobAttemptsXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("jobattempts")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList attempts = dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements", 1, attempts.getLength());
NodeList info = dom.getElementsByTagName("jobAttempt");
verifyHsJobAttemptsXML(info, appContext.getJob(id));
}
}
public void verifyHsJobAttempts(JSONObject info, Job job)
throws JSONException {
JSONArray attempts = info.getJSONArray("jobAttempt");
assertEquals("incorrect number of elements", 2, attempts.length());
for (int i = 0; i < attempts.length(); i++) {
JSONObject attempt = attempts.getJSONObject(i);
verifyHsJobAttemptsGeneric(job, attempt.getString("nodeHttpAddress"),
attempt.getString("nodeId"), attempt.getInt("id"),
attempt.getLong("startTime"), attempt.getString("containerId"),
attempt.getString("logsLink"));
}
}
public void verifyHsJobAttemptsXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 2, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyHsJobAttemptsGeneric(job,
WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
WebServicesTestUtils.getXmlString(element, "nodeId"),
WebServicesTestUtils.getXmlInt(element, "id"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlString(element, "containerId"),
WebServicesTestUtils.getXmlString(element, "logsLink"));
}
}
public void verifyHsJobAttemptsGeneric(Job job, String nodeHttpAddress,
String nodeId, int id, long startTime, String containerId, String logsLink) {
boolean attemptFound = false;
for (AMInfo amInfo : job.getAMInfos()) {
if (amInfo.getAppAttemptId().getAttemptId() == id) {
attemptFound = true;
String nmHost = amInfo.getNodeManagerHost();
int nmHttpPort = amInfo.getNodeManagerHttpPort();
int nmPort = amInfo.getNodeManagerPort();
WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":"
+ nmHttpPort, nodeHttpAddress);
WebServicesTestUtils.checkStringMatch("nodeId",
NodeId.newInstance(nmHost, nmPort).toString(), nodeId);
assertTrue("startime not greater than 0", startTime > 0);
WebServicesTestUtils.checkStringMatch("containerId", amInfo
.getContainerId().toString(), containerId);
String localLogsLink = join(
"hsmockwebapp",
ujoin("logs", nodeId, containerId, MRApps.toString(job.getID()),
job.getUserName()));
assertTrue("logsLink", logsLink.contains(localLogsLink));
}
}
assertTrue("attempt: " + id + " was not found", attemptFound);
}
}
| 35,339
| 42.150183
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.StringReader;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the history server Rest API for getting task attempts, a
* specific task attempt, and task attempt counters
*
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/
* counters
*/
public class TestHsWebServicesAttempts extends JerseyTest {
private static Configuration conf = new Configuration();
private static HistoryContext appContext;
private static HsWebApp webApp;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 2, 1);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestHsWebServicesAttempts() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testTaskAttempts() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json, task);
}
}
}
@Test
public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList attempts = dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements", 1, attempts.getLength());
NodeList nodes = dom.getElementsByTagName("taskAttempt");
verifyHsTaskAttemptsXML(nodes, task);
}
}
}
@Test
public void testTaskAttemptId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid + "/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info, att, task.getType());
}
}
}
}
@Test
public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid)
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("taskAttempt");
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyHsTaskAttemptXML(element, att, task.getType());
}
}
}
}
}
@Test
public void testTaskAttemptIdBogus() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("bogusid",
"java.lang.Exception: TaskAttemptId string : "
+ "bogusid is not properly formed");
}
@Test
public void testTaskAttemptIdNonExist() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric(
"attempt_0_1234_m_000000_0",
"java.lang.Exception: Error getting info on task attempt id attempt_0_1234_m_000000_0");
}
@Test
public void testTaskAttemptIdInvalid() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_0_1234_d_000000_0",
"java.lang.Exception: Bad TaskType identifier. TaskAttemptId string : "
+ "attempt_0_1234_d_000000_0 is not properly formed.");
}
@Test
public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_1234_m_000000_0",
"java.lang.Exception: TaskAttemptId string : "
+ "attempt_1234_m_000000_0 is not properly formed");
}
@Test
public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
testTaskAttemptIdErrorGeneric("attempt_0_1234_m_000000",
"java.lang.Exception: TaskAttemptId string : "
+ "attempt_0_1234_m_000000 is not properly formed");
}
private void testTaskAttemptIdErrorGeneric(String attid, String error)
throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
try {
r.path("ws").path("v1").path("history").path("mapreduce")
.path("jobs").path(jobId).path("tasks").path(tid)
.path("attempts").path(attid).accept(MediaType.APPLICATION_JSON)
.get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message", error,
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
}
public void verifyHsTaskAttemptXML(Element element, TaskAttempt att,
TaskType ttype) {
verifyTaskAttemptGeneric(att, ttype,
WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "type"),
WebServicesTestUtils.getXmlString(element, "rack"),
WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
WebServicesTestUtils.getXmlString(element, "diagnostics"),
WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlFloat(element, "progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att,
WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
}
}
public void verifyHsTaskAttempt(JSONObject info, TaskAttempt att,
TaskType ttype) throws JSONException {
if (ttype == TaskType.REDUCE) {
assertEquals("incorrect number of elements", 17, info.length());
} else {
assertEquals("incorrect number of elements", 12, info.length());
}
verifyTaskAttemptGeneric(att, ttype, info.getString("id"),
info.getString("state"), info.getString("type"),
info.getString("rack"), info.getString("nodeHttpAddress"),
info.getString("diagnostics"), info.getString("assignedContainerId"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), (float) info.getDouble("progress"));
if (ttype == TaskType.REDUCE) {
verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"),
info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"),
info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
}
}
public void verifyHsTaskAttempts(JSONObject json, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 1, json.length());
JSONObject attempts = json.getJSONObject("taskAttempts");
assertEquals("incorrect number of elements", 1, json.length());
JSONArray arr = attempts.getJSONArray("taskAttempt");
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId id = att.getID();
String attid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < arr.length(); i++) {
JSONObject info = arr.getJSONObject(i);
if (attid.matches(info.getString("id"))) {
found = true;
verifyHsTaskAttempt(info, att, task.getType());
}
}
assertTrue("task attempt with id: " + attid
+ " not in web service output", found);
}
}
public void verifyHsTaskAttemptsXML(NodeList nodes, Task task) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId id = att.getID();
String attid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
found = true;
verifyHsTaskAttemptXML(element, att, task.getType());
}
}
assertTrue("task with id: " + attid + " not in web service output", found);
}
}
public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype,
String id, String state, String type, String rack,
String nodeHttpAddress, String diagnostics, String assignedContainerId,
long startTime, long finishTime, long elapsedTime, float progress) {
TaskAttemptId attid = ta.getID();
String attemptId = MRApps.toString(attid);
WebServicesTestUtils.checkStringMatch("id", attemptId, id);
WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(),
state);
WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
WebServicesTestUtils.checkStringMatch("nodeHttpAddress",
ta.getNodeHttpAddress(), nodeHttpAddress);
String expectDiag = "";
List<String> diagnosticsList = ta.getDiagnostics();
if (diagnosticsList != null && !diagnostics.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagnosticsList) {
b.append(diag);
}
expectDiag = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag,
diagnostics);
WebServicesTestUtils.checkStringMatch("assignedContainerId",
ConverterUtils.toString(ta.getAssignedContainerID()),
assignedContainerId);
assertEquals("startTime wrong", ta.getLaunchTime(), startTime);
assertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
}
public void verifyReduceTaskAttemptGeneric(TaskAttempt ta,
long shuffleFinishTime, long mergeFinishTime, long elapsedShuffleTime,
long elapsedMergeTime, long elapsedReduceTime) {
assertEquals("shuffleFinishTime wrong", ta.getShuffleFinishTime(),
shuffleFinishTime);
assertEquals("mergeFinishTime wrong", ta.getSortFinishTime(),
mergeFinishTime);
assertEquals("elapsedShuffleTime wrong",
ta.getShuffleFinishTime() - ta.getLaunchTime(), elapsedShuffleTime);
assertEquals("elapsedMergeTime wrong",
ta.getSortFinishTime() - ta.getShuffleFinishTime(), elapsedMergeTime);
assertEquals("elapsedReduceTime wrong",
ta.getFinishTime() - ta.getSortFinishTime(), elapsedReduceTime);
}
@Test
public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid).path("counters")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskAttemptCounters");
verifyHsJobTaskAttemptCounters(info, att);
}
}
}
}
@Test
public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
for (TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid = att.getID();
String attid = MRApps.toString(attemptid);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid).path("attempts").path(attid).path("counters")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("jobTaskAttemptCounters");
verifyHsTaskCountersXML(nodes, att);
}
}
}
}
public void verifyHsJobTaskAttemptCounters(JSONObject info, TaskAttempt att)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("taskAttemptCounterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("name not set",
(counterName != null && !counterName.isEmpty()));
long value = counter.getLong("value");
assertTrue("value >= 0", value >= 0);
}
}
}
public void verifyHsTaskCountersXML(NodeList nodes, TaskAttempt att) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("taskAttemptCounterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long value = WebServicesTestUtils.getXmlLong(counter, "value");
assertTrue("value not >= 0", value >= 0);
}
}
}
}
}
| 27,998
| 39.934211
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.junit.Before;
import org.junit.Test;
public class TestHsWebServicesAcls {
private static String FRIENDLY_USER = "friendly";
private static String ENEMY_USER = "enemy";
private JobConf conf;
private HistoryContext ctx;
private String jobIdStr;
private String taskIdStr;
private String taskAttemptIdStr;
private HsWebServices hsWebServices;
@Before
public void setup() throws IOException {
this.conf = new JobConf();
this.conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
NullGroupsProvider.class.getName());
this.conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
Groups.getUserToGroupsMappingService(conf);
this.ctx = buildHistoryContext(this.conf);
WebApp webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
this.hsWebServices= new HsWebServices(ctx, conf, webApp);
this.hsWebServices.setResponse(mock(HttpServletResponse.class));
Job job = ctx.getAllJobs().values().iterator().next();
this.jobIdStr = job.getID().toString();
Task task = job.getTasks().values().iterator().next();
this.taskIdStr = task.getID().toString();
this.taskAttemptIdStr =
task.getAttempts().keySet().iterator().next().toString();
}
@Test
public void testGetJobAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJob(hsr, jobIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJob(hsr, jobIdStr);
}
@Test
public void testGetJobCountersAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobCounters(hsr, jobIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobCounters(hsr, jobIdStr);
}
@Test
public void testGetJobConfAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobConf(hsr, jobIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobConf(hsr, jobIdStr);
}
@Test
public void testGetJobTasksAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobTasks(hsr, jobIdStr, "m");
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobTasks(hsr, jobIdStr, "m");
}
@Test
public void testGetJobTaskAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobTask(hsr, jobIdStr, this.taskIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobTask(hsr, this.jobIdStr, this.taskIdStr);
}
@Test
public void testGetSingleTaskCountersAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getSingleTaskCounters(hsr, this.jobIdStr, this.taskIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getSingleTaskCounters(hsr, this.jobIdStr, this.taskIdStr);
}
@Test
public void testGetJobTaskAttemptsAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobTaskAttempts(hsr, this.jobIdStr, this.taskIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobTaskAttempts(hsr, this.jobIdStr, this.taskIdStr);
}
@Test
public void testGetJobTaskAttemptIdAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobTaskAttemptId(hsr, this.jobIdStr, this.taskIdStr,
this.taskAttemptIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobTaskAttemptId(hsr, this.jobIdStr, this.taskIdStr,
this.taskAttemptIdStr);
}
@Test
public void testGetJobTaskAttemptIdCountersAcls() {
HttpServletRequest hsr = mock(HttpServletRequest.class);
when(hsr.getRemoteUser()).thenReturn(ENEMY_USER);
try {
hsWebServices.getJobTaskAttemptIdCounters(hsr, this.jobIdStr,
this.taskIdStr, this.taskAttemptIdStr);
fail("enemy can access job");
} catch (WebApplicationException e) {
assertEquals(Status.UNAUTHORIZED,
Status.fromStatusCode(e.getResponse().getStatus()));
}
when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER);
hsWebServices.getJobTaskAttemptIdCounters(hsr, this.jobIdStr,
this.taskIdStr, this.taskAttemptIdStr);
}
private static HistoryContext buildHistoryContext(final Configuration conf)
throws IOException {
HistoryContext ctx = new MockHistoryContext(1, 1, 1);
Map<JobId, Job> jobs = ctx.getAllJobs();
JobId jobId = jobs.keySet().iterator().next();
Job mockJob = new MockJobForAcls(jobs.get(jobId), conf);
jobs.put(jobId, mockJob);
return ctx;
}
private static class NullGroupsProvider
implements GroupMappingServiceProvider {
@Override
public List<String> getGroups(String user) throws IOException {
return Collections.emptyList();
}
@Override
public void cacheGroupsRefresh() throws IOException {
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
}
}
private static class MockJobForAcls implements Job {
private Job mockJob;
private Configuration conf;
private Map<JobACL, AccessControlList> jobAcls;
private JobACLsManager aclsMgr;
public MockJobForAcls(Job mockJob, Configuration conf) {
this.mockJob = mockJob;
this.conf = conf;
AccessControlList viewAcl = new AccessControlList(FRIENDLY_USER);
this.jobAcls = new HashMap<JobACL, AccessControlList>();
this.jobAcls.put(JobACL.VIEW_JOB, viewAcl);
this.aclsMgr = new JobACLsManager(conf);
}
@Override
public JobId getID() {
return mockJob.getID();
}
@Override
public String getName() {
return mockJob.getName();
}
@Override
public JobState getState() {
return mockJob.getState();
}
@Override
public JobReport getReport() {
return mockJob.getReport();
}
@Override
public Counters getAllCounters() {
return mockJob.getAllCounters();
}
@Override
public Map<TaskId, Task> getTasks() {
return mockJob.getTasks();
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
return mockJob.getTasks(taskType);
}
@Override
public Task getTask(TaskId taskID) {
return mockJob.getTask(taskID);
}
@Override
public List<String> getDiagnostics() {
return mockJob.getDiagnostics();
}
@Override
public int getTotalMaps() {
return mockJob.getTotalMaps();
}
@Override
public int getTotalReduces() {
return mockJob.getTotalReduces();
}
@Override
public int getCompletedMaps() {
return mockJob.getCompletedMaps();
}
@Override
public int getCompletedReduces() {
return mockJob.getCompletedReduces();
}
@Override
public float getProgress() {
return mockJob.getProgress();
}
@Override
public boolean isUber() {
return mockJob.isUber();
}
@Override
public String getUserName() {
return mockJob.getUserName();
}
@Override
public String getQueueName() {
return mockJob.getQueueName();
}
@Override
public Path getConfFile() {
return new Path("/some/path/to/conf");
}
@Override
public Configuration loadConfFile() throws IOException {
return conf;
}
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobAcls;
}
@Override
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
return mockJob.getTaskAttemptCompletionEvents(fromEventId, maxEvents);
}
@Override
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
return mockJob.getMapAttemptCompletionEvents(startIndex, maxEvents);
}
@Override
public List<AMInfo> getAMInfos() {
return mockJob.getAMInfos();
}
@Override
public boolean checkAccess(UserGroupInformation callerUGI,
JobACL jobOperation) {
return aclsMgr.checkAccess(callerUGI, jobOperation,
this.getUserName(), jobAcls.get(jobOperation));
}
@Override
public void setQueueName(String queueName) {
}
}
}
| 13,049
| 29.778302
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.StringReader;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the history server Rest API for getting tasks, a specific task,
* and task counters.
*
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}
* /ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/counters
*/
public class TestHsWebServicesTasks extends JerseyTest {
private static Configuration conf = new Configuration();
private static MockHistoryContext appContext;
private static HsWebApp webApp;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 2, 1);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestHsWebServicesTasks() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testTasks() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyHsTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyHsTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 2, arr.length());
verifyHsTask(arr, jobsMap.get(id), null);
}
}
@Test
public void testTasksXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList tasks = dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements", 1, tasks.getLength());
NodeList task = dom.getElementsByTagName("task");
verifyHsTaskXML(task, jobsMap.get(id));
}
}
@Test
public void testTasksQueryMap() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String type = "m";
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.queryParam("type", type).accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 1, arr.length());
verifyHsTask(arr, jobsMap.get(id), type);
}
}
@Test
public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String type = "r";
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.queryParam("type", type).accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject tasks = json.getJSONObject("tasks");
JSONArray arr = tasks.getJSONArray("task");
assertEquals("incorrect number of elements", 1, arr.length());
verifyHsTask(arr, jobsMap.get(id), type);
}
}
@Test
public void testTasksQueryInvalid() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
// tasktype must be exactly either "m" or "r"
String tasktype = "reduce";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").queryParam("type", tasktype)
.accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: tasktype must be either m or r", message);
WebServicesTestUtils.checkStringMatch("exception type",
"BadRequestException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.BadRequestException", classname);
}
}
}
@Test
public void testTaskId() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyHsSingleTask(info, task);
}
}
}
@Test
public void testTaskIdSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks")
.path(tid + "/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyHsSingleTask(info, task);
}
}
}
@Test
public void testTaskIdDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("task");
verifyHsSingleTask(info, task);
}
}
}
@Test
public void testTaskIdBogus() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "bogustaskid";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "bogustaskid is not properly formed"
+ "\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,11 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdNonExist() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_m_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message",
"java.lang.Exception: task not found with id task_0_0000_m_000000",
message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_d_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0_0000_d_000000 is not properly formed" +
"\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,20 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid2() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0000_m_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0000_m_000000 is not properly formed" +
"\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,18 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdInvalid3() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
String tid = "task_0_0000_m";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs")
.path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject msg = response.getEntity(JSONObject.class);
JSONObject exception = msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements", 3, exception.length());
String message = exception.getString("message");
String type = exception.getString("exception");
String classname = exception.getString("javaClassName");
WebServicesTestUtils.checkStringEqual("exception message",
"java.lang.Exception: TaskId string : "
+ "task_0_0000_m is not properly formed" +
"\nReason: java.util.regex.Matcher[pattern=" +
TaskID.TASK_ID_REGEX + " region=0,13 lastmatch=]", message);
WebServicesTestUtils.checkStringMatch("exception type",
"NotFoundException", type);
WebServicesTestUtils.checkStringMatch("exception classname",
"org.apache.hadoop.yarn.webapp.NotFoundException", classname);
}
}
}
@Test
public void testTaskIdXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("task");
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyHsSingleTaskXML(element, task);
}
}
}
}
public void verifyHsSingleTask(JSONObject info, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 9, info.length());
verifyTaskGeneric(task, info.getString("id"), info.getString("state"),
info.getString("type"), info.getString("successfulAttempt"),
info.getLong("startTime"), info.getLong("finishTime"),
info.getLong("elapsedTime"), (float) info.getDouble("progress"));
}
public void verifyHsTask(JSONArray arr, Job job, String type)
throws JSONException {
for (Task task : job.getTasks().values()) {
TaskId id = task.getID();
String tid = MRApps.toString(id);
Boolean found = false;
if (type != null && task.getType() == MRApps.taskType(type)) {
for (int i = 0; i < arr.length(); i++) {
JSONObject info = arr.getJSONObject(i);
if (tid.matches(info.getString("id"))) {
found = true;
verifyHsSingleTask(info, task);
}
}
assertTrue("task with id: " + tid + " not in web service output", found);
}
}
}
public void verifyTaskGeneric(Task task, String id, String state,
String type, String successfulAttempt, long startTime, long finishTime,
long elapsedTime, float progress) {
TaskId taskid = task.getID();
String tid = MRApps.toString(taskid);
TaskReport report = task.getReport();
WebServicesTestUtils.checkStringMatch("id", tid, id);
WebServicesTestUtils.checkStringMatch("type", task.getType().toString(),
type);
WebServicesTestUtils.checkStringMatch("state", report.getTaskState()
.toString(), state);
// not easily checked without duplicating logic, just make sure its here
assertNotNull("successfulAttempt null", successfulAttempt);
assertEquals("startTime wrong", report.getStartTime(), startTime);
assertEquals("finishTime wrong", report.getFinishTime(), finishTime);
assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
assertEquals("progress wrong", report.getProgress() * 100, progress, 1e-3f);
}
public void verifyHsSingleTaskXML(Element element, Task task) {
verifyTaskGeneric(task, WebServicesTestUtils.getXmlString(element, "id"),
WebServicesTestUtils.getXmlString(element, "state"),
WebServicesTestUtils.getXmlString(element, "type"),
WebServicesTestUtils.getXmlString(element, "successfulAttempt"),
WebServicesTestUtils.getXmlLong(element, "startTime"),
WebServicesTestUtils.getXmlLong(element, "finishTime"),
WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
WebServicesTestUtils.getXmlFloat(element, "progress"));
}
public void verifyHsTaskXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 2, nodes.getLength());
for (Task task : job.getTasks().values()) {
TaskId id = task.getID();
String tid = MRApps.toString(id);
Boolean found = false;
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
if (tid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
found = true;
verifyHsSingleTaskXML(element, task);
}
}
assertTrue("task with id: " + tid + " not in web service output", found);
}
}
@Test
public void testTaskIdCounters() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("counters").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info, task);
}
}
}
@Test
public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("counters/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info, task);
}
}
}
@Test
public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("counters").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info, task);
}
}
}
@Test
public void testJobTaskCountersXML() throws Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
for (Task task : jobsMap.get(id).getTasks().values()) {
String tid = MRApps.toString(task.getID());
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid)
.path("counters").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("jobTaskCounters");
verifyHsTaskCountersXML(info, task);
}
}
}
public void verifyHsJobTaskCounters(JSONObject info, Task task)
throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(task.getID()),
info.getString("id"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray counterGroups = info.getJSONArray("taskCounterGroup");
for (int i = 0; i < counterGroups.length(); i++) {
JSONObject counterGroup = counterGroups.getJSONObject(i);
String name = counterGroup.getString("counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
JSONArray counters = counterGroup.getJSONArray("counter");
for (int j = 0; j < counters.length(); j++) {
JSONObject counter = counters.getJSONObject(j);
String counterName = counter.getString("name");
assertTrue("name not set",
(counterName != null && !counterName.isEmpty()));
long value = counter.getLong("value");
assertTrue("value >= 0", value >= 0);
}
}
}
public void verifyHsTaskCountersXML(NodeList nodes, Task task) {
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("id",
MRApps.toString(task.getID()),
WebServicesTestUtils.getXmlString(element, "id"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList groups = element.getElementsByTagName("taskCounterGroup");
for (int j = 0; j < groups.getLength(); j++) {
Element counters = (Element) groups.item(j);
assertNotNull("should have counters in the web service info", counters);
String name = WebServicesTestUtils.getXmlString(counters,
"counterGroupName");
assertTrue("name not set", (name != null && !name.isEmpty()));
NodeList counterArr = counters.getElementsByTagName("counter");
for (int z = 0; z < counterArr.getLength(); z++) {
Element counter = (Element) counterArr.item(z);
String counterName = WebServicesTestUtils.getXmlString(counter,
"name");
assertTrue("counter name not set",
(counterName != null && !counterName.isEmpty()));
long value = WebServicesTestUtils.getXmlLong(counter, "value");
assertTrue("value not >= 0", value >= 0);
}
}
}
}
}
| 34,231
| 42.774936
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServices.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.StringReader;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.ClientResponse.Status;
import com.sun.jersey.api.client.UniformInterfaceException;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the History Server info web services api's. Also test non-existent urls.
*
* /ws/v1/history
* /ws/v1/history/info
*/
public class TestHsWebServices extends JerseyTest {
private static Configuration conf = new Configuration();
private static HistoryContext appContext;
private static HsWebApp webApp;
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
appContext = new MockHistoryContext(0, 1, 1, 1);
JobHistory jobHistoryService = new JobHistory();
HistoryContext historyContext = (HistoryContext) jobHistoryService;
webApp = new HsWebApp(historyContext);
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
public TestHsWebServices() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testHS() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testHSSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testHSDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history/")
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testHSXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
verifyHSInfoXML(xml, appContext);
}
@Test
public void testInfo() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("info").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testInfoSlash() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("info/").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testInfoDefault() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("info/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
verifyHSInfo(json.getJSONObject("historyInfo"), appContext);
}
@Test
public void testInfoXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("history")
.path("info/").accept(MediaType.APPLICATION_XML)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
verifyHSInfoXML(xml, appContext);
}
@Test
public void testInvalidUri() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("history").path("bogus")
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidUri2() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("invalid")
.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
@Test
public void testInvalidAccept() throws JSONException, Exception {
WebResource r = resource();
String responseStr = "";
try {
responseStr = r.path("ws").path("v1").path("history")
.accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
} catch (UniformInterfaceException ue) {
ClientResponse response = ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,
response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch(
"error string exists and shouldn't", "", responseStr);
}
}
public void verifyHsInfoGeneric(String hadoopVersionBuiltOn,
String hadoopBuildVersion, String hadoopVersion, long startedon) {
WebServicesTestUtils.checkStringMatch("hadoopVersionBuiltOn",
VersionInfo.getDate(), hadoopVersionBuiltOn);
WebServicesTestUtils.checkStringEqual("hadoopBuildVersion",
VersionInfo.getBuildVersion(), hadoopBuildVersion);
WebServicesTestUtils.checkStringMatch("hadoopVersion",
VersionInfo.getVersion(), hadoopVersion);
assertEquals("startedOn doesn't match: ",
JobHistoryServer.historyServerTimeStamp, startedon);
}
public void verifyHSInfo(JSONObject info, AppContext ctx)
throws JSONException {
assertEquals("incorrect number of elements", 4, info.length());
verifyHsInfoGeneric(info.getString("hadoopVersionBuiltOn"),
info.getString("hadoopBuildVersion"), info.getString("hadoopVersion"),
info.getLong("startedOn"));
}
public void verifyHSInfoXML(String xml, AppContext ctx)
throws JSONException, Exception {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList nodes = dom.getElementsByTagName("historyInfo");
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
verifyHsInfoGeneric(
WebServicesTestUtils.getXmlString(element, "hadoopVersionBuiltOn"),
WebServicesTestUtils.getXmlString(element, "hadoopBuildVersion"),
WebServicesTestUtils.getXmlString(element, "hadoopVersion"),
WebServicesTestUtils.getXmlLong(element, "startedOn"));
}
}
}
| 11,726
| 38.618243
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.StringReader;
import java.util.Map;
import javax.ws.rs.core.MediaType;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
import org.apache.hadoop.mapreduce.v2.hs.MockHistoryContext;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.test.framework.JerseyTest;
import com.sun.jersey.test.framework.WebAppDescriptor;
/**
* Test the history server Rest API for getting the job conf. This
* requires created a temporary configuration file.
*
* /ws/v1/history/mapreduce/jobs/{jobid}/conf
*/
public class TestHsWebServicesJobConf extends JerseyTest {
private static Configuration conf = new Configuration();
private static HistoryContext appContext;
private static HsWebApp webApp;
private static File testConfDir = new File("target",
TestHsWebServicesJobConf.class.getSimpleName() + "confDir");
private Injector injector = Guice.createInjector(new ServletModule() {
@Override
protected void configureServlets() {
Path confPath = new Path(testConfDir.toString(),
MRJobConfig.JOB_CONF_FILE);
Configuration config = new Configuration();
FileSystem localFs;
try {
localFs = FileSystem.getLocal(config);
confPath = localFs.makeQualified(confPath);
OutputStream out = localFs.create(confPath);
try {
conf.writeXml(out);
} finally {
out.close();
}
if (!localFs.exists(confPath)) {
fail("error creating config file: " + confPath);
}
} catch (IOException e) {
fail("error creating config file: " + e.getMessage());
}
appContext = new MockHistoryContext(0, 2, 1, confPath);
webApp = mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
});
public class GuiceServletConfig extends GuiceServletContextListener {
@Override
protected Injector getInjector() {
return injector;
}
}
@Before
@Override
public void setUp() throws Exception {
super.setUp();
testConfDir.mkdir();
}
@AfterClass
static public void stop() {
FileUtil.fullyDelete(testConfDir);
}
public TestHsWebServicesJobConf() {
super(new WebAppDescriptor.Builder(
"org.apache.hadoop.mapreduce.v2.hs.webapp")
.contextListenerClass(GuiceServletConfig.class)
.filterClass(com.google.inject.servlet.GuiceFilter.class)
.contextPath("jersey-guice-filter").servletPath("/").build());
}
@Test
public void testJobConf() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history")
.path("mapreduce")
.path("jobs").path(jobId).path("conf")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyHsJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfSlash() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce")
.path("jobs").path(jobId).path("conf/")
.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyHsJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfDefault() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce")
.path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
JSONObject json = response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements", 1, json.length());
JSONObject info = json.getJSONObject("conf");
verifyHsJobConf(info, jobsMap.get(id));
}
}
@Test
public void testJobConfXML() throws JSONException, Exception {
WebResource r = resource();
Map<JobId, Job> jobsMap = appContext.getAllJobs();
for (JobId id : jobsMap.keySet()) {
String jobId = MRApps.toString(id);
ClientResponse response = r.path("ws").path("v1").path("history").path("mapreduce")
.path("jobs").path(jobId).path("conf")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
String xml = response.getEntity(String.class);
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom = db.parse(is);
NodeList info = dom.getElementsByTagName("conf");
verifyHsJobConfXML(info, jobsMap.get(id));
}
}
public void verifyHsJobConf(JSONObject info, Job job) throws JSONException {
assertEquals("incorrect number of elements", 2, info.length());
WebServicesTestUtils.checkStringMatch("path", job.getConfFile().toString(),
info.getString("path"));
// just do simple verification of fields - not data is correct
// in the fields
JSONArray properties = info.getJSONArray("property");
for (int i = 0; i < properties.length(); i++) {
JSONObject prop = properties.getJSONObject(i);
String name = prop.getString("name");
String value = prop.getString("value");
assertTrue("name not set", (name != null && !name.isEmpty()));
assertTrue("value not set", (value != null && !value.isEmpty()));
}
}
public void verifyHsJobConfXML(NodeList nodes, Job job) {
assertEquals("incorrect number of elements", 1, nodes.getLength());
for (int i = 0; i < nodes.getLength(); i++) {
Element element = (Element) nodes.item(i);
WebServicesTestUtils.checkStringMatch("path", job.getConfFile()
.toString(), WebServicesTestUtils.getXmlString(element, "path"));
// just do simple verification of fields - not data is correct
// in the fields
NodeList properties = element.getElementsByTagName("property");
for (int j = 0; j < properties.getLength(); j++) {
Element property = (Element) properties.item(j);
assertNotNull("should have counters in the web service info", property);
String name = WebServicesTestUtils.getXmlString(property, "name");
String value = WebServicesTestUtils.getXmlString(property, "value");
assertTrue("name not set", (name != null && !name.isEmpty()));
assertTrue("name not set", (value != null && !value.isEmpty()));
}
}
}
}
| 10,384
| 36.089286
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/VerifyJobsUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
public class VerifyJobsUtils {
public static void verifyHsJobPartial(JSONObject info, Job job) throws JSONException {
assertEquals("incorrect number of elements", 12, info.length());
// everyone access fields
verifyHsJobGeneric(job, info.getString("id"), info.getString("user"),
info.getString("name"), info.getString("state"),
info.getString("queue"), info.getLong("startTime"),
info.getLong("finishTime"), info.getInt("mapsTotal"),
info.getInt("mapsCompleted"), info.getInt("reducesTotal"),
info.getInt("reducesCompleted"));
}
public static void verifyHsJob(JSONObject info, Job job) throws JSONException {
assertEquals("incorrect number of elements", 25, info.length());
// everyone access fields
verifyHsJobGeneric(job, info.getString("id"), info.getString("user"),
info.getString("name"), info.getString("state"),
info.getString("queue"), info.getLong("startTime"),
info.getLong("finishTime"), info.getInt("mapsTotal"),
info.getInt("mapsCompleted"), info.getInt("reducesTotal"),
info.getInt("reducesCompleted"));
String diagnostics = "";
if (info.has("diagnostics")) {
diagnostics = info.getString("diagnostics");
}
// restricted access fields - if security and acls set
verifyHsJobGenericSecure(job, info.getBoolean("uberized"), diagnostics,
info.getLong("avgMapTime"), info.getLong("avgReduceTime"),
info.getLong("avgShuffleTime"), info.getLong("avgMergeTime"),
info.getInt("failedReduceAttempts"),
info.getInt("killedReduceAttempts"),
info.getInt("successfulReduceAttempts"),
info.getInt("failedMapAttempts"), info.getInt("killedMapAttempts"),
info.getInt("successfulMapAttempts"));
// acls not being checked since
// we are using mock job instead of CompletedJob
}
public static void verifyHsJobGeneric(Job job, String id, String user,
String name, String state, String queue, long startTime, long finishTime,
int mapsTotal, int mapsCompleted, int reducesTotal, int reducesCompleted) {
JobReport report = job.getReport();
WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
id);
WebServicesTestUtils.checkStringMatch("user", job.getUserName().toString(),
user);
WebServicesTestUtils.checkStringMatch("name", job.getName(), name);
WebServicesTestUtils.checkStringMatch("state", job.getState().toString(),
state);
WebServicesTestUtils.checkStringMatch("queue", job.getQueueName(), queue);
assertEquals("startTime incorrect", report.getStartTime(), startTime);
assertEquals("finishTime incorrect", report.getFinishTime(), finishTime);
assertEquals("mapsTotal incorrect", job.getTotalMaps(), mapsTotal);
assertEquals("mapsCompleted incorrect", job.getCompletedMaps(),
mapsCompleted);
assertEquals("reducesTotal incorrect", job.getTotalReduces(), reducesTotal);
assertEquals("reducesCompleted incorrect", job.getCompletedReduces(),
reducesCompleted);
}
public static void verifyHsJobGenericSecure(Job job, Boolean uberized,
String diagnostics, long avgMapTime, long avgReduceTime,
long avgShuffleTime, long avgMergeTime, int failedReduceAttempts,
int killedReduceAttempts, int successfulReduceAttempts,
int failedMapAttempts, int killedMapAttempts, int successfulMapAttempts) {
String diagString = "";
List<String> diagList = job.getDiagnostics();
if (diagList != null && !diagList.isEmpty()) {
StringBuffer b = new StringBuffer();
for (String diag : diagList) {
b.append(diag);
}
diagString = b.toString();
}
WebServicesTestUtils.checkStringMatch("diagnostics", diagString,
diagnostics);
assertEquals("isUber incorrect", job.isUber(), uberized);
// unfortunately the following fields are all calculated in JobInfo
// so not easily accessible without doing all the calculations again.
// For now just make sure they are present.
assertTrue("failedReduceAttempts not >= 0", failedReduceAttempts >= 0);
assertTrue("killedReduceAttempts not >= 0", killedReduceAttempts >= 0);
assertTrue("successfulReduceAttempts not >= 0",
successfulReduceAttempts >= 0);
assertTrue("failedMapAttempts not >= 0", failedMapAttempts >= 0);
assertTrue("killedMapAttempts not >= 0", killedMapAttempts >= 0);
assertTrue("successfulMapAttempts not >= 0", successfulMapAttempts >= 0);
assertTrue("avgMapTime not >= 0", avgMapTime >= 0);
assertTrue("avgReduceTime not >= 0", avgReduceTime >= 0);
assertTrue("avgShuffleTime not >= 0", avgShuffleTime >= 0);
assertTrue("avgMergeTime not >= 0", avgMergeTime >= 0);
}
}
| 6,085
| 41.559441
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams;
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
import org.apache.hadoop.mapreduce.v2.app.webapp.AppForTest;
import org.apache.hadoop.mapreduce.v2.hs.webapp.HsTaskPage.AttemptsBlock;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
import org.apache.hadoop.yarn.webapp.Params;
import org.apache.hadoop.yarn.webapp.View;
import org.apache.hadoop.yarn.webapp.log.AggregatedLogsPage;
import org.apache.hadoop.yarn.webapp.view.BlockForTest;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
/**
* Test some HtmlBlock classes
*/
public class TestBlocks {
private ByteArrayOutputStream data = new ByteArrayOutputStream();
@Test
public void testPullTaskLink(){
Task task = getTask(0);
String taskId = task.getID().toString();
Assert.assertEquals("pull links doesn't work correctly",
"Task failed <a href=\"/jobhistory/task/" + taskId + "\">" +
taskId + "</a>"
, HsJobBlock.addTaskLinks("Task failed " + taskId));
Assert.assertEquals("pull links doesn't work correctly",
"Task failed <a href=\"/jobhistory/task/" + taskId + "\">" +
taskId + "</a>\n Job failed as tasks failed. failedMaps:1 failedReduces:0"
, HsJobBlock.addTaskLinks("Task failed " + taskId + "\n " +
"Job failed as tasks failed. failedMaps:1 failedReduces:0"));
}
/**
* test HsTasksBlock's rendering.
*/
@Test
public void testHsTasksBlock() {
Task task = getTask(0);
Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
tasks.put(task.getID(), task);
AppContext ctx = mock(AppContext.class);
AppForTest app = new AppForTest(ctx);
Job job = mock(Job.class);
when(job.getTasks()).thenReturn(tasks);
app.setJob(job);
HsTasksBlockForTest block = new HsTasksBlockForTest(app);
block.addParameter(AMParams.TASK_TYPE, "r");
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
block.render(html);
pWriter.flush();
// should be printed information about task
assertTrue(data.toString().contains("task_0_0001_r_000000"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertTrue(data.toString().contains("100001"));
assertTrue(data.toString().contains("100011"));
assertTrue(data.toString().contains(""));
}
/**
* test AttemptsBlock's rendering.
*/
@Test
public void testAttemptsBlock() {
AppContext ctx = mock(AppContext.class);
AppForTest app = new AppForTest(ctx);
Task task = getTask(0);
Map<TaskAttemptId, TaskAttempt> attempts = new HashMap<TaskAttemptId, TaskAttempt>();
TaskAttempt attempt = mock(TaskAttempt.class);
TaskAttemptId taId = new TaskAttemptIdPBImpl();
taId.setId(0);
taId.setTaskId(task.getID());
when(attempt.getID()).thenReturn(taId);
when(attempt.getNodeHttpAddress()).thenReturn("Node address");
ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 5);
ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1);
ContainerId containerId = ContainerIdPBImpl.newContainerId(appAttemptId, 1);
when(attempt.getAssignedContainerID()).thenReturn(containerId);
when(attempt.getAssignedContainerMgrAddress()).thenReturn(
"assignedContainerMgrAddress");
when(attempt.getNodeRackName()).thenReturn("nodeRackName");
final long taStartTime = 100002L;
final long taFinishTime = 100012L;
final long taShuffleFinishTime = 100010L;
final long taSortFinishTime = 100011L;
final TaskAttemptState taState = TaskAttemptState.SUCCEEDED;
when(attempt.getLaunchTime()).thenReturn(taStartTime);
when(attempt.getFinishTime()).thenReturn(taFinishTime);
when(attempt.getShuffleFinishTime()).thenReturn(taShuffleFinishTime);
when(attempt.getSortFinishTime()).thenReturn(taSortFinishTime);
when(attempt.getState()).thenReturn(taState);
TaskAttemptReport taReport = mock(TaskAttemptReport.class);
when(taReport.getStartTime()).thenReturn(taStartTime);
when(taReport.getFinishTime()).thenReturn(taFinishTime);
when(taReport.getShuffleFinishTime()).thenReturn(taShuffleFinishTime);
when(taReport.getSortFinishTime()).thenReturn(taSortFinishTime);
when(taReport.getContainerId()).thenReturn(containerId);
when(taReport.getProgress()).thenReturn(1.0f);
when(taReport.getStateString()).thenReturn("Processed 128/128 records <p> \n");
when(taReport.getTaskAttemptState()).thenReturn(taState);
when(taReport.getDiagnosticInfo()).thenReturn("");
when(attempt.getReport()).thenReturn(taReport);
attempts.put(taId, attempt);
when(task.getAttempts()).thenReturn(attempts);
app.setTask(task);
Job job = mock(Job.class);
when(job.getUserName()).thenReturn("User");
app.setJob(job);
AttemptsBlockForTest block = new AttemptsBlockForTest(app);
block.addParameter(AMParams.TASK_TYPE, "r");
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
block.render(html);
pWriter.flush();
// should be printed information about attempts
assertTrue(data.toString().contains("attempt_0_0001_r_000000_0"));
assertTrue(data.toString().contains("SUCCEEDED"));
assertFalse(data.toString().contains("Processed 128/128 records <p> \n"));
assertTrue(data.toString().contains("Processed 128\\/128 records <p> \\n"));
assertTrue(data.toString().contains(
"_0005_01_000001:attempt_0_0001_r_000000_0:User:"));
assertTrue(data.toString().contains("100002"));
assertTrue(data.toString().contains("100010"));
assertTrue(data.toString().contains("100011"));
assertTrue(data.toString().contains("100012"));
}
/**
* test HsJobsBlock's rendering.
*/
@Test
public void testHsJobsBlock() {
AppContext ctx = mock(AppContext.class);
Map<JobId, Job> jobs = new HashMap<JobId, Job>();
Job job = getJob();
jobs.put(job.getID(), job);
when(ctx.getAllJobs()).thenReturn(jobs);
HsJobsBlock block = new HsJobsBlockForTest(ctx);
PrintWriter pWriter = new PrintWriter(data);
Block html = new BlockForTest(new HtmlBlockForTest(), pWriter, 0, false);
block.render(html);
pWriter.flush();
assertTrue(data.toString().contains("JobName"));
assertTrue(data.toString().contains("UserName"));
assertTrue(data.toString().contains("QueueName"));
assertTrue(data.toString().contains("SUCCEEDED"));
}
/**
* test HsController
*/
@Test
public void testHsController() throws Exception {
AppContext ctx = mock(AppContext.class);
ApplicationId appId = ApplicationIdPBImpl.newInstance(0,5);
when(ctx.getApplicationID()).thenReturn(appId);
AppForTest app = new AppForTest(ctx);
Configuration config = new Configuration();
RequestContext requestCtx = mock(RequestContext.class);
HsControllerForTest controller = new HsControllerForTest(app, config,
requestCtx);
controller.index();
assertEquals("JobHistory", controller.get(Params.TITLE, ""));
assertEquals(HsJobPage.class, controller.jobPage());
assertEquals(HsCountersPage.class, controller.countersPage());
assertEquals(HsTasksPage.class, controller.tasksPage());
assertEquals(HsTaskPage.class, controller.taskPage());
assertEquals(HsAttemptsPage.class, controller.attemptsPage());
controller.set(AMParams.JOB_ID, "job_01_01");
controller.set(AMParams.TASK_ID, "task_01_01_m_01");
controller.set(AMParams.TASK_TYPE, "m");
controller.set(AMParams.ATTEMPT_STATE, "State");
Job job = mock(Job.class);
Task task = mock(Task.class);
when(job.getTask(any(TaskId.class))).thenReturn(task);
JobId jobID = MRApps.toJobID("job_01_01");
when(ctx.getJob(jobID)).thenReturn(job);
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
controller.job();
assertEquals(HsJobPage.class, controller.getClazz());
controller.jobCounters();
assertEquals(HsCountersPage.class, controller.getClazz());
controller.taskCounters();
assertEquals(HsCountersPage.class, controller.getClazz());
controller.tasks();
assertEquals(HsTasksPage.class, controller.getClazz());
controller.task();
assertEquals(HsTaskPage.class, controller.getClazz());
controller.attempts();
assertEquals(HsAttemptsPage.class, controller.getClazz());
assertEquals(HsConfPage.class, controller.confPage());
assertEquals(HsAboutPage.class, controller.aboutPage());
controller.about();
assertEquals(HsAboutPage.class, controller.getClazz());
controller.logs();
assertEquals(HsLogsPage.class, controller.getClazz());
controller.nmlogs();
assertEquals(AggregatedLogsPage.class, controller.getClazz());
assertEquals(HsSingleCounterPage.class, controller.singleCounterPage());
controller.singleJobCounter();
assertEquals(HsSingleCounterPage.class, controller.getClazz());
controller.singleTaskCounter();
assertEquals(HsSingleCounterPage.class, controller.getClazz());
}
private static class HsControllerForTest extends HsController {
static private Map<String, String> params = new HashMap<String, String>();
private Class<?> clazz;
ByteArrayOutputStream data = new ByteArrayOutputStream();
public void set(String name, String value) {
params.put(name, value);
}
public String get(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
HsControllerForTest(App app, Configuration configuration, RequestContext ctx) {
super(app, configuration, ctx);
}
@Override
public HttpServletRequest request() {
HttpServletRequest result = mock(HttpServletRequest.class);
when(result.getRemoteUser()).thenReturn("User");
return result;
}
public HttpServletResponse response() {
HttpServletResponse result = mock(HttpServletResponse.class);
try {
when(result.getWriter()).thenReturn(new PrintWriter(data));
} catch (IOException ignored) {
}
return result;
}
protected void render(Class<? extends View> cls) {
clazz = cls;
}
public Class<?> getClazz() {
return clazz;
}
}
private Job getJob() {
Job job = mock(Job.class);
JobId jobId = new JobIdPBImpl();
ApplicationId appId = ApplicationIdPBImpl.newInstance(System.currentTimeMillis(),4);
jobId.setAppId(appId);
jobId.setId(1);
when(job.getID()).thenReturn(jobId);
JobReport report = mock(JobReport.class);
when(report.getStartTime()).thenReturn(100010L);
when(report.getFinishTime()).thenReturn(100015L);
when(job.getReport()).thenReturn(report);
when(job.getName()).thenReturn("JobName");
when(job.getUserName()).thenReturn("UserName");
when(job.getQueueName()).thenReturn("QueueName");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
when(job.getTotalMaps()).thenReturn(3);
when(job.getCompletedMaps()).thenReturn(2);
when(job.getTotalReduces()).thenReturn(2);
when(job.getCompletedReduces()).thenReturn(1);
when(job.getCompletedReduces()).thenReturn(1);
return job;
}
private Task getTask(long timestamp) {
JobId jobId = new JobIdPBImpl();
jobId.setId(0);
jobId.setAppId(ApplicationIdPBImpl.newInstance(timestamp,1));
TaskId taskId = new TaskIdPBImpl();
taskId.setId(0);
taskId.setTaskType(TaskType.REDUCE);
taskId.setJobId(jobId);
Task task = mock(Task.class);
when(task.getID()).thenReturn(taskId);
TaskReport report = mock(TaskReport.class);
when(report.getProgress()).thenReturn(0.7f);
when(report.getTaskState()).thenReturn(TaskState.SUCCEEDED);
when(report.getStartTime()).thenReturn(100001L);
when(report.getFinishTime()).thenReturn(100011L);
when(task.getReport()).thenReturn(report);
when(task.getType()).thenReturn(TaskType.REDUCE);
return task;
}
private class HsJobsBlockForTest extends HsJobsBlock {
HsJobsBlockForTest(AppContext appCtx) {
super(appCtx);
}
@Override
public String url(String... parts) {
String result = "url://";
for (String string : parts) {
result += string + ":";
}
return result;
}
}
private class AttemptsBlockForTest extends AttemptsBlock {
private final Map<String, String> params = new HashMap<String, String>();
public void addParameter(String name, String value) {
params.put(name, value);
}
public String $(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
public AttemptsBlockForTest(App ctx) {
super(ctx);
}
@Override
public String url(String... parts) {
String result = "url://";
for (String string : parts) {
result += string + ":";
}
return result;
}
}
private class HsTasksBlockForTest extends HsTasksBlock {
private final Map<String, String> params = new HashMap<String, String>();
public void addParameter(String name, String value) {
params.put(name, value);
}
public String $(String key, String defaultValue) {
String value = params.get(key);
return value == null ? defaultValue : value;
}
@Override
public String url(String... parts) {
String result = "url://";
for (String string : parts) {
result += string + ":";
}
return result;
}
public HsTasksBlockForTest(App app) {
super(app);
}
}
private class HtmlBlockForTest extends HtmlBlock {
@Override
protected void render(Block html) {
}
}
}
| 16,873
| 34.154167
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/TestJobInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.HashMap;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.CompletedJob;
import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEntities;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.junit.Test;
public class TestJobInfo {
@Test(timeout = 10000)
public void testAverageMergeTime() throws IOException {
String historyFileName =
"job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
String confFileName =
"job_1329348432655_0001_conf.xml";
Configuration conf = new Configuration();
JobACLsManager jobAclsMgr = new JobACLsManager(conf);
Path fulleHistoryPath =
new Path(TestJobHistoryEntities.class.getClassLoader()
.getResource(historyFileName)
.getFile());
Path fullConfPath =
new Path(TestJobHistoryEntities.class.getClassLoader()
.getResource(confFileName)
.getFile());
HistoryFileInfo info = mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
CompletedJob completedJob =
new CompletedJob(conf, jobId, fulleHistoryPath, true, "user",
info, jobAclsMgr);
JobInfo jobInfo = new JobInfo(completedJob);
// There are 2 tasks with merge time of 45 and 55 respectively. So average
// merge time should be 50.
Assert.assertEquals(50L, jobInfo.getAvgMergeTime().longValue());
}
@Test
public void testAverageReduceTime() {
Job job = mock(CompletedJob.class);
final Task task1 = mock(Task.class);
final Task task2 = mock(Task.class);
JobId jobId = MRBuilderUtils.newJobId(1L, 1, 1);
final TaskId taskId1 = MRBuilderUtils.newTaskId(jobId, 1, TaskType.REDUCE);
final TaskId taskId2 = MRBuilderUtils.newTaskId(jobId, 2, TaskType.REDUCE);
final TaskAttemptId taskAttemptId1 = MRBuilderUtils.
newTaskAttemptId(taskId1, 1);
final TaskAttemptId taskAttemptId2 = MRBuilderUtils.
newTaskAttemptId(taskId2, 2);
final TaskAttempt taskAttempt1 = mock(TaskAttempt.class);
final TaskAttempt taskAttempt2 = mock(TaskAttempt.class);
JobReport jobReport = mock(JobReport.class);
when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt1.getLaunchTime()).thenReturn(0L);
when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L);
when(taskAttempt1.getSortFinishTime()).thenReturn(6L);
when(taskAttempt1.getFinishTime()).thenReturn(8L);
when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt2.getLaunchTime()).thenReturn(5L);
when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L);
when(taskAttempt2.getSortFinishTime()).thenReturn(22L);
when(taskAttempt2.getFinishTime()).thenReturn(42L);
when(task1.getType()).thenReturn(TaskType.REDUCE);
when(task2.getType()).thenReturn(TaskType.REDUCE);
when(task1.getAttempts()).thenReturn
(new HashMap<TaskAttemptId, TaskAttempt>()
{{put(taskAttemptId1,taskAttempt1); }});
when(task2.getAttempts()).thenReturn
(new HashMap<TaskAttemptId, TaskAttempt>()
{{put(taskAttemptId2,taskAttempt2); }});
when(job.getTasks()).thenReturn
(new HashMap<TaskId, Task>()
{{ put(taskId1,task1); put(taskId2, task2); }});
when(job.getID()).thenReturn(jobId);
when(job.getReport()).thenReturn(jobReport);
when(job.getName()).thenReturn("TestJobInfo");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
JobInfo jobInfo = new JobInfo(job);
Assert.assertEquals(11L, jobInfo.getAvgReduceTime().longValue());
}
}
| 5,541
| 38.585714
| 105
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/server/TestHSAdminServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs.server;
import static org.junit.Assert.*;
import java.io.IOException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.hs.JobHistory;
import org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService;
@RunWith(Parameterized.class)
public class TestHSAdminServer {
private boolean securityEnabled = true;
private HSAdminServer hsAdminServer = null;
private HSAdmin hsAdminClient = null;
JobConf conf = null;
private static long groupRefreshTimeoutSec = 1;
JobHistory jobHistoryService = null;
AggregatedLogDeletionService alds = null;
public static class MockUnixGroupsMapping implements
GroupMappingServiceProvider {
private int i = 0;
@Override
public List<String> getGroups(String user) throws IOException {
System.out.println("Getting groups in MockUnixGroupsMapping");
String g1 = user + (10 * i + 1);
String g2 = user + (10 * i + 2);
List<String> l = new ArrayList<String>(2);
l.add(g1);
l.add(g2);
i++;
return l;
}
@Override
public void cacheGroupsRefresh() throws IOException {
System.out.println("Refreshing groups in MockUnixGroupsMapping");
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
}
}
@Parameters
public static Collection<Object[]> testParameters() {
return Arrays.asList(new Object[][] { { false }, { true } });
}
public TestHSAdminServer(boolean enableSecurity) {
securityEnabled = enableSecurity;
}
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
conf = new JobConf();
conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class,
GroupMappingServiceProvider.class);
conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
conf.setBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
securityEnabled);
Groups.getUserToGroupsMappingService(conf);
jobHistoryService = mock(JobHistory.class);
alds = mock(AggregatedLogDeletionService.class);
hsAdminServer = new HSAdminServer(alds, jobHistoryService) {
@Override
protected Configuration createConf() {
return conf;
}
};
hsAdminServer.init(conf);
hsAdminServer.start();
conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
hsAdminServer.clientRpcServer.getListenerAddress());
hsAdminClient = new HSAdmin(conf);
}
@Test
public void testGetGroups() throws Exception {
// Get the current user
String user = UserGroupInformation.getCurrentUser().getUserName();
String[] args = new String[2];
args[0] = "-getGroups";
args[1] = user;
// Run the getGroups command
int exitCode = hsAdminClient.run(args);
assertEquals("Exit code should be 0 but was: " + exitCode, 0, exitCode);
}
@Test
public void testRefreshUserToGroupsMappings() throws Exception {
String[] args = new String[] { "-refreshUserToGroupsMappings" };
Groups groups = Groups.getUserToGroupsMappingService(conf);
String user = UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List<String> g1 = groups.getGroups(user);
String[] str_groups = new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
// Now groups of this user has changed but getGroups returns from the
// cache,so we would see same groups as before
System.out.println("second attempt, should be same:");
List<String> g2 = groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i = 0; i < g2.size(); i++) {
assertEquals("Should be same group ", g1.get(i), g2.get(i));
}
// run the command,which clears the cache
hsAdminClient.run(args);
System.out
.println("third attempt(after refresh command), should be different:");
// Now get groups should return new groups
List<String> g3 = groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i = 0; i < g3.size(); i++) {
assertFalse(
"Should be different group: " + g1.get(i) + " and " + g3.get(i), g1
.get(i).equals(g3.get(i)));
}
}
@Test
public void testRefreshSuperUserGroups() throws Exception {
UserGroupInformation ugi = mock(UserGroupInformation.class);
UserGroupInformation superUser = mock(UserGroupInformation.class);
when(ugi.getRealUser()).thenReturn(superUser);
when(superUser.getShortUserName()).thenReturn("superuser");
when(superUser.getUserName()).thenReturn("superuser");
when(ugi.getGroupNames()).thenReturn(new String[] { "group3" });
when(ugi.getUserName()).thenReturn("regularUser");
// Set super user groups not to include groups of regularUser
conf.set("hadoop.proxyuser.superuser.groups", "group1,group2");
conf.set("hadoop.proxyuser.superuser.hosts", "127.0.0.1");
String[] args = new String[1];
args[0] = "-refreshSuperUserGroupsConfiguration";
hsAdminClient.run(args);
Throwable th = null;
try {
ProxyUsers.authorize(ugi, "127.0.0.1");
} catch (Exception e) {
th = e;
}
// Exception should be thrown
assertTrue(th instanceof AuthorizationException);
// Now add regularUser group to superuser group but not execute
// refreshSuperUserGroupMapping
conf.set("hadoop.proxyuser.superuser.groups", "group1,group2,group3");
// Again,lets run ProxyUsers.authorize and see if regularUser can be
// impersonated
// resetting th
th = null;
try {
ProxyUsers.authorize(ugi, "127.0.0.1");
} catch (Exception e) {
th = e;
}
// Exception should be thrown again since we didn't refresh the configs
assertTrue(th instanceof AuthorizationException);
// Lets refresh the config by running refreshSuperUserGroupsConfiguration
hsAdminClient.run(args);
th = null;
try {
ProxyUsers.authorize(ugi, "127.0.0.1");
} catch (Exception e) {
th = e;
}
// No exception thrown since regularUser can be impersonated.
assertNull("Unexpected exception thrown: " + th, th);
}
@Test
public void testRefreshAdminAcls() throws Exception {
// Setting current user to admin acl
conf.set(JHAdminConfig.JHS_ADMIN_ACL, UserGroupInformation.getCurrentUser()
.getUserName());
String[] args = new String[1];
args[0] = "-refreshAdminAcls";
hsAdminClient.run(args);
// Now I should be able to run any hsadmin command without any exception
// being thrown
args[0] = "-refreshSuperUserGroupsConfiguration";
hsAdminClient.run(args);
// Lets remove current user from admin acl
conf.set(JHAdminConfig.JHS_ADMIN_ACL, "notCurrentUser");
args[0] = "-refreshAdminAcls";
hsAdminClient.run(args);
// Now I should get an exception if i run any hsadmin command
Throwable th = null;
args[0] = "-refreshSuperUserGroupsConfiguration";
try {
hsAdminClient.run(args);
} catch (Exception e) {
th = e;
}
assertTrue(th instanceof RemoteException);
}
@Test
public void testRefreshLoadedJobCache() throws Exception {
String[] args = new String[1];
args[0] = "-refreshLoadedJobCache";
hsAdminClient.run(args);
verify(jobHistoryService).refreshLoadedJobCache();
}
@Test
public void testRefreshLogRetentionSettings() throws Exception {
String[] args = new String[1];
args[0] = "-refreshLogRetentionSettings";
hsAdminClient.run(args);
verify(alds).refreshLogRetentionSettings();
}
@Test
public void testRefreshJobRetentionSettings() throws Exception {
String[] args = new String[1];
args[0] = "-refreshJobRetentionSettings";
hsAdminClient.run(args);
verify(jobHistoryService).refreshJobRetentionSettings();
}
@SuppressWarnings("unchecked")
@Test
public void testUGIForLogAndJobRefresh() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("test", new String[] {"grp"});
UserGroupInformation loginUGI = spy(hsAdminServer.getLoginUGI());
hsAdminServer.setLoginUGI(loginUGI);
// Run refresh log retention settings with test user
ugi.doAs(new PrivilegedAction<Void>() {
@Override
public Void run() {
String[] args = new String[1];
args[0] = "-refreshLogRetentionSettings";
try {
hsAdminClient.run(args);
} catch (Exception e) {
fail("refreshLogRetentionSettings should have been successful");
}
return null;
}
});
// Verify if AggregatedLogDeletionService#refreshLogRetentionSettings was
// called with login UGI, instead of the UGI command was run with.
verify(loginUGI).doAs(any(PrivilegedExceptionAction.class));
verify(alds).refreshLogRetentionSettings();
// Reset for refresh job retention settings
reset(loginUGI);
// Run refresh job retention settings with test user
ugi.doAs(new PrivilegedAction<Void>() {
@Override
public Void run() {
String[] args = new String[1];
args[0] = "-refreshJobRetentionSettings";
try {
hsAdminClient.run(args);
} catch (Exception e) {
fail("refreshJobRetentionSettings should have been successful");
}
return null;
}
});
// Verify if JobHistory#refreshJobRetentionSettings was called with
// login UGI, instead of the UGI command was run with.
verify(loginUGI).doAs(any(PrivilegedExceptionAction.class));
verify(jobHistoryService).refreshJobRetentionSettings();
}
@After
public void cleanUp() {
if (hsAdminServer != null)
hsAdminServer.stop();
}
}
| 12,024
| 33.259259
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
| 849
| 43.736842
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.util.Records;
public class CompletedTaskAttempt implements TaskAttempt {
private final TaskAttemptInfo attemptInfo;
private final TaskAttemptId attemptId;
private final TaskAttemptState state;
private final List<String> diagnostics = new ArrayList<String>();
private TaskAttemptReport report;
private String localDiagMessage;
CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
this.attemptInfo = attemptInfo;
this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
if (attemptInfo.getTaskStatus() != null) {
this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
} else {
this.state = TaskAttemptState.KILLED;
localDiagMessage = "Attmpt state missing from History : marked as KILLED";
diagnostics.add(localDiagMessage);
}
if (attemptInfo.getError() != null) {
diagnostics.add(attemptInfo.getError());
}
}
@Override
public NodeId getNodeId() throws UnsupportedOperationException{
throw new UnsupportedOperationException();
}
@Override
public ContainerId getAssignedContainerID() {
return attemptInfo.getContainerId();
}
@Override
public String getAssignedContainerMgrAddress() {
return attemptInfo.getHostname() + ":" + attemptInfo.getPort();
}
@Override
public String getNodeHttpAddress() {
return attemptInfo.getTrackerName() + ":" + attemptInfo.getHttpPort();
}
@Override
public String getNodeRackName() {
return attemptInfo.getRackname();
}
@Override
public Counters getCounters() {
return attemptInfo.getCounters();
}
@Override
public TaskAttemptId getID() {
return attemptId;
}
@Override
public float getProgress() {
return 1.0f;
}
@Override
public synchronized TaskAttemptReport getReport() {
if (report == null) {
constructTaskAttemptReport();
}
return report;
}
@Override
public Phase getPhase() {
return Phase.CLEANUP;
}
@Override
public TaskAttemptState getState() {
return state;
}
@Override
public boolean isFinished() {
return true;
}
@Override
public List<String> getDiagnostics() {
return diagnostics;
}
@Override
public long getLaunchTime() {
return attemptInfo.getStartTime();
}
@Override
public long getFinishTime() {
return attemptInfo.getFinishTime();
}
@Override
public long getShuffleFinishTime() {
return attemptInfo.getShuffleFinishTime();
}
@Override
public long getSortFinishTime() {
return attemptInfo.getSortFinishTime();
}
@Override
public int getShufflePort() {
return attemptInfo.getShufflePort();
}
private void constructTaskAttemptReport() {
report = Records.newRecord(TaskAttemptReport.class);
report.setTaskAttemptId(attemptId);
report.setTaskAttemptState(state);
report.setProgress(getProgress());
report.setStartTime(attemptInfo.getStartTime());
report.setFinishTime(attemptInfo.getFinishTime());
report.setShuffleFinishTime(attemptInfo.getShuffleFinishTime());
report.setSortFinishTime(attemptInfo.getSortFinishTime());
if (localDiagMessage != null) {
report
.setDiagnosticInfo(attemptInfo.getError() + ", " + localDiagMessage);
} else {
report.setDiagnosticInfo(attemptInfo.getError());
}
// report.setPhase(attemptInfo.get); //TODO
report.setStateString(attemptInfo.getState());
report.setRawCounters(getCounters());
report.setContainerId(attemptInfo.getContainerId());
if (attemptInfo.getHostname() == null) {
report.setNodeManagerHost("UNKNOWN");
} else {
report.setNodeManagerHost(attemptInfo.getHostname());
report.setNodeManagerPort(attemptInfo.getPort());
}
report.setNodeManagerHttpPort(attemptInfo.getHttpPort());
}
}
| 5,398
| 28.342391
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.ConnectException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableSet;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapreduce.jobhistory.JobSummary;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ShutdownThreadsHelper;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
/**
* This class provides a way to interact with history files in a thread safe
* manor.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class HistoryFileManager extends AbstractService {
private static final Log LOG = LogFactory.getLog(HistoryFileManager.class);
private static final Log SUMMARY_LOG = LogFactory.getLog(JobSummary.class);
private static enum HistoryInfoState {
IN_INTERMEDIATE, IN_DONE, DELETED, MOVE_FAILED
};
private static String DONE_BEFORE_SERIAL_TAIL = JobHistoryUtils
.doneSubdirsBeforeSerialTail();
/**
* Maps between a serial number (generated based on jobId) and the timestamp
* component(s) to which it belongs. Facilitates jobId based searches. If a
* jobId is not found in this list - it will not be found.
*/
private static class SerialNumberIndex {
private SortedMap<String, Set<String>> cache;
private int maxSize;
public SerialNumberIndex(int maxSize) {
this.cache = new TreeMap<String, Set<String>>();
this.maxSize = maxSize;
}
public synchronized void add(String serialPart, String timestampPart) {
if (!cache.containsKey(serialPart)) {
cache.put(serialPart, new HashSet<String>());
if (cache.size() > maxSize) {
String key = cache.firstKey();
LOG.error("Dropping " + key
+ " from the SerialNumberIndex. We will no "
+ "longer be able to see jobs that are in that serial index for "
+ cache.get(key));
cache.remove(key);
}
}
Set<String> datePartSet = cache.get(serialPart);
datePartSet.add(timestampPart);
}
public synchronized void remove(String serialPart, String timeStampPart) {
if (cache.containsKey(serialPart)) {
Set<String> set = cache.get(serialPart);
set.remove(timeStampPart);
if (set.isEmpty()) {
cache.remove(serialPart);
}
}
}
public synchronized Set<String> get(String serialPart) {
Set<String> found = cache.get(serialPart);
if (found != null) {
return new HashSet<String>(found);
}
return null;
}
}
/**
* Wrapper around {@link ConcurrentSkipListMap} that maintains size along
* side for O(1) size() implementation for use in JobListCache.
*
* Note: The size is not updated atomically with changes additions/removals.
* This race can lead to size() returning an incorrect size at times.
*/
static class JobIdHistoryFileInfoMap {
private ConcurrentSkipListMap<JobId, HistoryFileInfo> cache;
private AtomicInteger mapSize;
JobIdHistoryFileInfoMap() {
cache = new ConcurrentSkipListMap<JobId, HistoryFileInfo>();
mapSize = new AtomicInteger();
}
public HistoryFileInfo putIfAbsent(JobId key, HistoryFileInfo value) {
HistoryFileInfo ret = cache.putIfAbsent(key, value);
if (ret == null) {
mapSize.incrementAndGet();
}
return ret;
}
public HistoryFileInfo remove(JobId key) {
HistoryFileInfo ret = cache.remove(key);
if (ret != null) {
mapSize.decrementAndGet();
}
return ret;
}
/**
* Returns the recorded size of the internal map. Note that this could be out
* of sync with the actual size of the map
* @return "recorded" size
*/
public int size() {
return mapSize.get();
}
public HistoryFileInfo get(JobId key) {
return cache.get(key);
}
public NavigableSet<JobId> navigableKeySet() {
return cache.navigableKeySet();
}
public Collection<HistoryFileInfo> values() {
return cache.values();
}
}
static class JobListCache {
private JobIdHistoryFileInfoMap cache;
private int maxSize;
private long maxAge;
public JobListCache(int maxSize, long maxAge) {
this.maxSize = maxSize;
this.maxAge = maxAge;
this.cache = new JobIdHistoryFileInfoMap();
}
public HistoryFileInfo addIfAbsent(HistoryFileInfo fileInfo) {
JobId jobId = fileInfo.getJobId();
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + jobId + " to job list cache with "
+ fileInfo.getJobIndexInfo());
}
HistoryFileInfo old = cache.putIfAbsent(jobId, fileInfo);
if (cache.size() > maxSize) {
//There is a race here, where more then one thread could be trying to
// remove entries. This could result in too many entries being removed
// from the cache. This is considered OK as the size of the cache
// should be rather large, and we would rather have performance over
// keeping the cache size exactly at the maximum.
Iterator<JobId> keys = cache.navigableKeySet().iterator();
long cutoff = System.currentTimeMillis() - maxAge;
while(cache.size() > maxSize && keys.hasNext()) {
JobId key = keys.next();
HistoryFileInfo firstValue = cache.get(key);
if(firstValue != null) {
synchronized(firstValue) {
if (firstValue.isMovePending()) {
if(firstValue.didMoveFail() &&
firstValue.jobIndexInfo.getFinishTime() <= cutoff) {
cache.remove(key);
//Now lets try to delete it
try {
firstValue.delete();
} catch (IOException e) {
LOG.error("Error while trying to delete history files" +
" that could not be moved to done.", e);
}
} else {
LOG.warn("Waiting to remove " + key
+ " from JobListCache because it is not in done yet.");
}
} else {
cache.remove(key);
}
}
}
}
}
return old;
}
public void delete(HistoryFileInfo fileInfo) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing from cache " + fileInfo);
}
cache.remove(fileInfo.getJobId());
}
public Collection<HistoryFileInfo> values() {
return new ArrayList<HistoryFileInfo>(cache.values());
}
public HistoryFileInfo get(JobId jobId) {
return cache.get(jobId);
}
public boolean isFull() {
return cache.size() >= maxSize;
}
}
/**
* This class represents a user dir in the intermediate done directory. This
* is mostly for locking purposes.
*/
private class UserLogDir {
long modTime = 0;
public synchronized void scanIfNeeded(FileStatus fs) {
long newModTime = fs.getModificationTime();
if (modTime != newModTime) {
Path p = fs.getPath();
try {
scanIntermediateDirectory(p);
//If scanning fails, we will scan again. We assume the failure is
// temporary.
modTime = newModTime;
} catch (IOException e) {
LOG.error("Error while trying to scan the directory " + p, e);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Scan not needed of " + fs.getPath());
}
}
}
}
public class HistoryFileInfo {
private Path historyFile;
private Path confFile;
private Path summaryFile;
private JobIndexInfo jobIndexInfo;
private HistoryInfoState state;
@VisibleForTesting
protected HistoryFileInfo(Path historyFile, Path confFile,
Path summaryFile, JobIndexInfo jobIndexInfo, boolean isInDone) {
this.historyFile = historyFile;
this.confFile = confFile;
this.summaryFile = summaryFile;
this.jobIndexInfo = jobIndexInfo;
state = isInDone ? HistoryInfoState.IN_DONE
: HistoryInfoState.IN_INTERMEDIATE;
}
@VisibleForTesting
synchronized boolean isMovePending() {
return state == HistoryInfoState.IN_INTERMEDIATE
|| state == HistoryInfoState.MOVE_FAILED;
}
@VisibleForTesting
synchronized boolean didMoveFail() {
return state == HistoryInfoState.MOVE_FAILED;
}
/**
* @return true if the files backed by this were deleted.
*/
public synchronized boolean isDeleted() {
return state == HistoryInfoState.DELETED;
}
@Override
public String toString() {
return "HistoryFileInfo jobID " + getJobId()
+ " historyFile = " + historyFile;
}
@VisibleForTesting
synchronized void moveToDone() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("moveToDone: " + historyFile);
}
if (!isMovePending()) {
// It was either deleted or is already in done. Either way do nothing
if (LOG.isDebugEnabled()) {
LOG.debug("Move no longer pending");
}
return;
}
try {
long completeTime = jobIndexInfo.getFinishTime();
if (completeTime == 0) {
completeTime = System.currentTimeMillis();
}
JobId jobId = jobIndexInfo.getJobId();
List<Path> paths = new ArrayList<Path>(2);
if (historyFile == null) {
LOG.info("No file for job-history with " + jobId + " found in cache!");
} else {
paths.add(historyFile);
}
if (confFile == null) {
LOG.info("No file for jobConf with " + jobId + " found in cache!");
} else {
paths.add(confFile);
}
if (summaryFile == null || !intermediateDoneDirFc.util().exists(
summaryFile)) {
LOG.info("No summary file for job: " + jobId);
} else {
String jobSummaryString = getJobSummary(intermediateDoneDirFc,
summaryFile);
SUMMARY_LOG.info(jobSummaryString);
LOG.info("Deleting JobSummary file: [" + summaryFile + "]");
intermediateDoneDirFc.delete(summaryFile, false);
summaryFile = null;
}
Path targetDir = canonicalHistoryLogPath(jobId, completeTime);
addDirectoryToSerialNumberIndex(targetDir);
makeDoneSubdir(targetDir);
if (historyFile != null) {
Path toPath = doneDirFc.makeQualified(new Path(targetDir, historyFile
.getName()));
if (!toPath.equals(historyFile)) {
moveToDoneNow(historyFile, toPath);
historyFile = toPath;
}
}
if (confFile != null) {
Path toPath = doneDirFc.makeQualified(new Path(targetDir, confFile
.getName()));
if (!toPath.equals(confFile)) {
moveToDoneNow(confFile, toPath);
confFile = toPath;
}
}
state = HistoryInfoState.IN_DONE;
} catch (Throwable t) {
LOG.error("Error while trying to move a job to done", t);
this.state = HistoryInfoState.MOVE_FAILED;
}
}
/**
* Parse a job from the JobHistoryFile, if the underlying file is not going
* to be deleted.
*
* @return the Job or null if the underlying file was deleted.
* @throws IOException
* if there is an error trying to read the file.
*/
public synchronized Job loadJob() throws IOException {
return new CompletedJob(conf, jobIndexInfo.getJobId(), historyFile,
false, jobIndexInfo.getUser(), this, aclsMgr);
}
/**
* Return the history file. This should only be used for testing.
* @return the history file.
*/
synchronized Path getHistoryFile() {
return historyFile;
}
protected synchronized void delete() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("deleting " + historyFile + " and " + confFile);
}
state = HistoryInfoState.DELETED;
doneDirFc.delete(doneDirFc.makeQualified(historyFile), false);
doneDirFc.delete(doneDirFc.makeQualified(confFile), false);
}
public JobIndexInfo getJobIndexInfo() {
return jobIndexInfo;
}
public JobId getJobId() {
return jobIndexInfo.getJobId();
}
public synchronized Path getConfFile() {
return confFile;
}
public synchronized Configuration loadConfFile() throws IOException {
FileContext fc = FileContext.getFileContext(confFile.toUri(), conf);
Configuration jobConf = new Configuration(false);
jobConf.addResource(fc.open(confFile), confFile.toString());
return jobConf;
}
}
private SerialNumberIndex serialNumberIndex = null;
protected JobListCache jobListCache = null;
// Maintains a list of known done subdirectories.
private final Set<Path> existingDoneSubdirs = Collections
.synchronizedSet(new HashSet<Path>());
/**
* Maintains a mapping between intermediate user directories and the last
* known modification time.
*/
private ConcurrentMap<String, UserLogDir> userDirModificationTimeMap =
new ConcurrentHashMap<String, UserLogDir>();
private JobACLsManager aclsMgr;
@VisibleForTesting
Configuration conf;
private String serialNumberFormat;
private Path doneDirPrefixPath = null; // folder for completed jobs
private FileContext doneDirFc; // done Dir FileContext
private Path intermediateDoneDirPath = null; // Intermediate Done Dir Path
private FileContext intermediateDoneDirFc; // Intermediate Done Dir
// FileContext
@VisibleForTesting
protected ThreadPoolExecutor moveToDoneExecutor = null;
private long maxHistoryAge = 0;
public HistoryFileManager() {
super(HistoryFileManager.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
int serialNumberLowDigits = 3;
serialNumberFormat = ("%0"
+ (JobHistoryUtils.SERIAL_NUMBER_DIRECTORY_DIGITS + serialNumberLowDigits)
+ "d");
long maxFSWaitTime = conf.getLong(
JHAdminConfig.MR_HISTORY_MAX_START_WAIT_TIME,
JHAdminConfig.DEFAULT_MR_HISTORY_MAX_START_WAIT_TIME);
createHistoryDirs(new SystemClock(), 10 * 1000, maxFSWaitTime);
this.aclsMgr = new JobACLsManager(conf);
maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE);
jobListCache = createJobListCache();
serialNumberIndex = new SerialNumberIndex(conf.getInt(
JHAdminConfig.MR_HISTORY_DATESTRING_CACHE_SIZE,
JHAdminConfig.DEFAULT_MR_HISTORY_DATESTRING_CACHE_SIZE));
int numMoveThreads = conf.getInt(
JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT,
JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_THREAD_COUNT);
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(
"MoveIntermediateToDone Thread #%d").build();
moveToDoneExecutor = new ThreadPoolExecutor(numMoveThreads, numMoveThreads,
1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf);
super.serviceInit(conf);
}
@VisibleForTesting
void createHistoryDirs(Clock clock, long intervalCheckMillis,
long timeOutMillis) throws IOException {
long start = clock.getTime();
boolean done = false;
int counter = 0;
while (!done &&
((timeOutMillis == -1) || (clock.getTime() - start < timeOutMillis))) {
done = tryCreatingHistoryDirs(counter++ % 3 == 0); // log every 3 attempts, 30sec
try {
Thread.sleep(intervalCheckMillis);
} catch (InterruptedException ex) {
throw new YarnRuntimeException(ex);
}
}
if (!done) {
throw new YarnRuntimeException("Timed out '" + timeOutMillis+
"ms' waiting for FileSystem to become available");
}
}
/**
* DistributedFileSystem returns a RemoteException with a message stating
* SafeModeException in it. So this is only way to check it is because of
* being in safe mode.
*/
private boolean isBecauseSafeMode(Throwable ex) {
return ex.toString().contains("SafeModeException");
}
/**
* Returns TRUE if the history dirs were created, FALSE if they could not
* be created because the FileSystem is not reachable or in safe mode and
* throws and exception otherwise.
*/
@VisibleForTesting
boolean tryCreatingHistoryDirs(boolean logWait) throws IOException {
boolean succeeded = true;
String doneDirPrefix = JobHistoryUtils.
getConfiguredHistoryServerDoneDirPrefix(conf);
try {
doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(
new Path(doneDirPrefix));
doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
} catch (ConnectException ex) {
if (logWait) {
LOG.info("Waiting for FileSystem at " +
doneDirPrefixPath.toUri().getAuthority() + "to be available");
}
succeeded = false;
} catch (IOException e) {
if (isBecauseSafeMode(e)) {
succeeded = false;
if (logWait) {
LOG.info("Waiting for FileSystem at " +
doneDirPrefixPath.toUri().getAuthority() +
"to be out of safe mode");
}
} else {
throw new YarnRuntimeException("Error creating done directory: ["
+ doneDirPrefixPath + "]", e);
}
}
if (succeeded) {
String intermediateDoneDirPrefix = JobHistoryUtils.
getConfiguredHistoryIntermediateDoneDirPrefix(conf);
try {
intermediateDoneDirPath = FileContext.getFileContext(conf).makeQualified(
new Path(intermediateDoneDirPrefix));
intermediateDoneDirFc = FileContext.getFileContext(
intermediateDoneDirPath.toUri(), conf);
mkdir(intermediateDoneDirFc, intermediateDoneDirPath, new FsPermission(
JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
} catch (ConnectException ex) {
succeeded = false;
if (logWait) {
LOG.info("Waiting for FileSystem at " +
intermediateDoneDirPath.toUri().getAuthority() +
"to be available");
}
} catch (IOException e) {
if (isBecauseSafeMode(e)) {
succeeded = false;
if (logWait) {
LOG.info("Waiting for FileSystem at " +
intermediateDoneDirPath.toUri().getAuthority() +
"to be out of safe mode");
}
} else {
throw new YarnRuntimeException(
"Error creating intermediate done directory: ["
+ intermediateDoneDirPath + "]", e);
}
}
}
return succeeded;
}
@Override
public void serviceStop() throws Exception {
ShutdownThreadsHelper.shutdownExecutorService(moveToDoneExecutor);
super.serviceStop();
}
protected JobListCache createJobListCache() {
return new JobListCache(conf.getInt(
JHAdminConfig.MR_HISTORY_JOBLIST_CACHE_SIZE,
JHAdminConfig.DEFAULT_MR_HISTORY_JOBLIST_CACHE_SIZE), maxHistoryAge);
}
private void mkdir(FileContext fc, Path path, FsPermission fsp)
throws IOException {
if (!fc.util().exists(path)) {
try {
fc.mkdir(path, fsp, true);
FileStatus fsStatus = fc.getFileStatus(path);
LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ ", Expected: " + fsp.toShort());
if (fsStatus.getPermission().toShort() != fsp.toShort()) {
LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ ", " + fsp);
fc.setPermission(path, fsp);
}
} catch (FileAlreadyExistsException e) {
LOG.info("Directory: [" + path + "] already exists.");
}
}
}
/**
* Populates index data structures. Should only be called at initialization
* times.
*/
@SuppressWarnings("unchecked")
void initExisting() throws IOException {
LOG.info("Initializing Existing Jobs...");
List<FileStatus> timestampedDirList = findTimestampedDirectories();
// Sort first just so insertion is in a consistent order
Collections.sort(timestampedDirList);
for (FileStatus fs : timestampedDirList) {
// TODO Could verify the correct format for these directories.
addDirectoryToSerialNumberIndex(fs.getPath());
}
for (int i= timestampedDirList.size() - 1;
i >= 0 && !jobListCache.isFull(); i--) {
FileStatus fs = timestampedDirList.get(i);
addDirectoryToJobListCache(fs.getPath());
}
}
private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) {
String serialPart = serialDirPath.getName();
String timeStampPart = JobHistoryUtils
.getTimestampPartFromPath(serialDirPath.toString());
if (timeStampPart == null) {
LOG.warn("Could not find timestamp portion from path: "
+ serialDirPath.toString() + ". Continuing with next");
return;
}
if (serialPart == null) {
LOG.warn("Could not find serial portion from path: "
+ serialDirPath.toString() + ". Continuing with next");
return;
}
serialNumberIndex.remove(serialPart, timeStampPart);
}
private void addDirectoryToSerialNumberIndex(Path serialDirPath) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + serialDirPath + " to serial index");
}
String serialPart = serialDirPath.getName();
String timestampPart = JobHistoryUtils
.getTimestampPartFromPath(serialDirPath.toString());
if (timestampPart == null) {
LOG.warn("Could not find timestamp portion from path: " + serialDirPath
+ ". Continuing with next");
return;
}
if (serialPart == null) {
LOG.warn("Could not find serial portion from path: "
+ serialDirPath.toString() + ". Continuing with next");
} else {
serialNumberIndex.add(serialPart, timestampPart);
}
}
private void addDirectoryToJobListCache(Path path) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding " + path + " to job list cache.");
}
List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(path,
doneDirFc);
for (FileStatus fs : historyFileList) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding in history for " + fs.getPath());
}
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
.getName());
String confFileName = JobHistoryUtils
.getIntermediateConfFileName(jobIndexInfo.getJobId());
String summaryFileName = JobHistoryUtils
.getIntermediateSummaryFileName(jobIndexInfo.getJobId());
HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs
.getPath().getParent(), confFileName), new Path(fs.getPath()
.getParent(), summaryFileName), jobIndexInfo, true);
jobListCache.addIfAbsent(fileInfo);
}
}
@VisibleForTesting
protected static List<FileStatus> scanDirectory(Path path, FileContext fc,
PathFilter pathFilter) throws IOException {
path = fc.makeQualified(path);
List<FileStatus> jhStatusList = new ArrayList<FileStatus>();
try {
RemoteIterator<FileStatus> fileStatusIter = fc.listStatus(path);
while (fileStatusIter.hasNext()) {
FileStatus fileStatus = fileStatusIter.next();
Path filePath = fileStatus.getPath();
if (fileStatus.isFile() && pathFilter.accept(filePath)) {
jhStatusList.add(fileStatus);
}
}
} catch (FileNotFoundException fe) {
LOG.error("Error while scanning directory " + path, fe);
}
return jhStatusList;
}
protected List<FileStatus> scanDirectoryForHistoryFiles(Path path,
FileContext fc) throws IOException {
return scanDirectory(path, fc, JobHistoryUtils.getHistoryFileFilter());
}
/**
* Finds all history directories with a timestamp component by scanning the
* filesystem. Used when the JobHistory server is started.
*
* @return list of history directories
*/
protected List<FileStatus> findTimestampedDirectories() throws IOException {
List<FileStatus> fsList = JobHistoryUtils.localGlobber(doneDirFc,
doneDirPrefixPath, DONE_BEFORE_SERIAL_TAIL);
return fsList;
}
/**
* Scans the intermediate directory to find user directories. Scans these for
* history files if the modification time for the directory has changed. Once
* it finds history files it starts the process of moving them to the done
* directory.
*
* @throws IOException
* if there was a error while scanning
*/
void scanIntermediateDirectory() throws IOException {
// TODO it would be great to limit how often this happens, except in the
// case where we are looking for a particular job.
List<FileStatus> userDirList = JobHistoryUtils.localGlobber(
intermediateDoneDirFc, intermediateDoneDirPath, "");
LOG.debug("Scanning intermediate dirs");
for (FileStatus userDir : userDirList) {
String name = userDir.getPath().getName();
UserLogDir dir = userDirModificationTimeMap.get(name);
if(dir == null) {
dir = new UserLogDir();
UserLogDir old = userDirModificationTimeMap.putIfAbsent(name, dir);
if(old != null) {
dir = old;
}
}
dir.scanIfNeeded(userDir);
}
}
/**
* Scans the specified path and populates the intermediate cache.
*
* @param absPath
* @throws IOException
*/
private void scanIntermediateDirectory(final Path absPath) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Scanning intermediate dir " + absPath);
}
List<FileStatus> fileStatusList = scanDirectoryForHistoryFiles(absPath,
intermediateDoneDirFc);
if (LOG.isDebugEnabled()) {
LOG.debug("Found " + fileStatusList.size() + " files");
}
for (FileStatus fs : fileStatusList) {
if (LOG.isDebugEnabled()) {
LOG.debug("scanning file: "+ fs.getPath());
}
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
.getName());
String confFileName = JobHistoryUtils
.getIntermediateConfFileName(jobIndexInfo.getJobId());
String summaryFileName = JobHistoryUtils
.getIntermediateSummaryFileName(jobIndexInfo.getJobId());
HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(fs
.getPath().getParent(), confFileName), new Path(fs.getPath()
.getParent(), summaryFileName), jobIndexInfo, false);
final HistoryFileInfo old = jobListCache.addIfAbsent(fileInfo);
if (old == null || old.didMoveFail()) {
final HistoryFileInfo found = (old == null) ? fileInfo : old;
long cutoff = System.currentTimeMillis() - maxHistoryAge;
if(found.getJobIndexInfo().getFinishTime() <= cutoff) {
try {
found.delete();
} catch (IOException e) {
LOG.warn("Error cleaning up a HistoryFile that is out of date.", e);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Scheduling move to done of " +found);
}
moveToDoneExecutor.execute(new Runnable() {
@Override
public void run() {
try {
found.moveToDone();
} catch (IOException e) {
LOG.info("Failed to process fileInfo for job: " +
found.getJobId(), e);
}
}
});
}
} else if (!old.isMovePending()) {
//This is a duplicate so just delete it
if (LOG.isDebugEnabled()) {
LOG.debug("Duplicate: deleting");
}
fileInfo.delete();
}
}
}
/**
* Searches the job history file FileStatus list for the specified JobId.
*
* @param fileStatusList
* fileStatus list of Job History Files.
* @param jobId
* The JobId to find.
* @return A FileInfo object for the jobId, null if not found.
* @throws IOException
*/
private HistoryFileInfo getJobFileInfo(List<FileStatus> fileStatusList,
JobId jobId) throws IOException {
for (FileStatus fs : fileStatusList) {
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fs.getPath()
.getName());
if (jobIndexInfo.getJobId().equals(jobId)) {
String confFileName = JobHistoryUtils
.getIntermediateConfFileName(jobIndexInfo.getJobId());
String summaryFileName = JobHistoryUtils
.getIntermediateSummaryFileName(jobIndexInfo.getJobId());
HistoryFileInfo fileInfo = new HistoryFileInfo(fs.getPath(), new Path(
fs.getPath().getParent(), confFileName), new Path(fs.getPath()
.getParent(), summaryFileName), jobIndexInfo, true);
return fileInfo;
}
}
return null;
}
/**
* Scans old directories known by the idToDateString map for the specified
* jobId. If the number of directories is higher than the supported size of
* the idToDateString cache, the jobId will not be found.
*
* @param jobId
* the jobId.
* @return
* @throws IOException
*/
private HistoryFileInfo scanOldDirsForJob(JobId jobId) throws IOException {
String boxedSerialNumber = JobHistoryUtils.serialNumberDirectoryComponent(
jobId, serialNumberFormat);
Set<String> dateStringSet = serialNumberIndex.get(boxedSerialNumber);
if (dateStringSet == null) {
return null;
}
for (String timestampPart : dateStringSet) {
Path logDir = canonicalHistoryLogPath(jobId, timestampPart);
List<FileStatus> fileStatusList = scanDirectoryForHistoryFiles(logDir,
doneDirFc);
HistoryFileInfo fileInfo = getJobFileInfo(fileStatusList, jobId);
if (fileInfo != null) {
return fileInfo;
}
}
return null;
}
public Collection<HistoryFileInfo> getAllFileInfo() throws IOException {
scanIntermediateDirectory();
return jobListCache.values();
}
public HistoryFileInfo getFileInfo(JobId jobId) throws IOException {
// FileInfo available in cache.
HistoryFileInfo fileInfo = jobListCache.get(jobId);
if (fileInfo != null) {
return fileInfo;
}
// OK so scan the intermediate to be sure we did not lose it that way
scanIntermediateDirectory();
fileInfo = jobListCache.get(jobId);
if (fileInfo != null) {
return fileInfo;
}
// Intermediate directory does not contain job. Search through older ones.
fileInfo = scanOldDirsForJob(jobId);
if (fileInfo != null) {
return fileInfo;
}
return null;
}
private void moveToDoneNow(final Path src, final Path target)
throws IOException {
LOG.info("Moving " + src.toString() + " to " + target.toString());
intermediateDoneDirFc.rename(src, target, Options.Rename.NONE);
}
private String getJobSummary(FileContext fc, Path path) throws IOException {
Path qPath = fc.makeQualified(path);
FSDataInputStream in = fc.open(qPath);
String jobSummaryString = in.readUTF();
in.close();
return jobSummaryString;
}
private void makeDoneSubdir(Path path) throws IOException {
try {
doneDirFc.getFileStatus(path);
existingDoneSubdirs.add(path);
} catch (FileNotFoundException fnfE) {
try {
FsPermission fsp = new FsPermission(
JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION);
doneDirFc.mkdir(path, fsp, true);
FileStatus fsStatus = doneDirFc.getFileStatus(path);
LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
+ ", Expected: " + fsp.toShort());
if (fsStatus.getPermission().toShort() != fsp.toShort()) {
LOG.info("Explicitly setting permissions to : " + fsp.toShort()
+ ", " + fsp);
doneDirFc.setPermission(path, fsp);
}
existingDoneSubdirs.add(path);
} catch (FileAlreadyExistsException faeE) { // Nothing to do.
}
}
}
private Path canonicalHistoryLogPath(JobId id, String timestampComponent) {
return new Path(doneDirPrefixPath, JobHistoryUtils.historyLogSubdirectory(
id, timestampComponent, serialNumberFormat));
}
private Path canonicalHistoryLogPath(JobId id, long millisecondTime) {
String timestampComponent = JobHistoryUtils
.timestampDirectoryComponent(millisecondTime);
return new Path(doneDirPrefixPath, JobHistoryUtils.historyLogSubdirectory(
id, timestampComponent, serialNumberFormat));
}
private long getEffectiveTimestamp(long finishTime, FileStatus fileStatus) {
if (finishTime == 0) {
return fileStatus.getModificationTime();
}
return finishTime;
}
private void deleteJobFromDone(HistoryFileInfo fileInfo) throws IOException {
jobListCache.delete(fileInfo);
fileInfo.delete();
}
List<FileStatus> getHistoryDirsForCleaning(long cutoff) throws IOException {
return JobHistoryUtils.
getHistoryDirsForCleaning(doneDirFc, doneDirPrefixPath, cutoff);
}
/**
* Clean up older history files.
*
* @throws IOException
* on any error trying to remove the entries.
*/
@SuppressWarnings("unchecked")
void clean() throws IOException {
long cutoff = System.currentTimeMillis() - maxHistoryAge;
boolean halted = false;
List<FileStatus> serialDirList = getHistoryDirsForCleaning(cutoff);
// Sort in ascending order. Relies on YYYY/MM/DD/Serial
Collections.sort(serialDirList);
for (FileStatus serialDir : serialDirList) {
List<FileStatus> historyFileList = scanDirectoryForHistoryFiles(
serialDir.getPath(), doneDirFc);
for (FileStatus historyFile : historyFileList) {
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(historyFile
.getPath().getName());
long effectiveTimestamp = getEffectiveTimestamp(
jobIndexInfo.getFinishTime(), historyFile);
if (effectiveTimestamp <= cutoff) {
HistoryFileInfo fileInfo = this.jobListCache.get(jobIndexInfo
.getJobId());
if (fileInfo == null) {
String confFileName = JobHistoryUtils
.getIntermediateConfFileName(jobIndexInfo.getJobId());
fileInfo = new HistoryFileInfo(historyFile.getPath(), new Path(
historyFile.getPath().getParent(), confFileName), null,
jobIndexInfo, true);
}
deleteJobFromDone(fileInfo);
} else {
halted = true;
break;
}
}
if (!halted) {
deleteDir(serialDir);
removeDirectoryFromSerialNumberIndex(serialDir.getPath());
existingDoneSubdirs.remove(serialDir.getPath());
} else {
break; // Don't scan any more directories.
}
}
}
protected boolean deleteDir(FileStatus serialDir)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return doneDirFc.delete(doneDirFc.makeQualified(serialDir.getPath()), true);
}
@VisibleForTesting
protected void setMaxHistoryAge(long newValue){
maxHistoryAge=newValue;
}
}
| 38,492
| 34.444751
| 87
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.counters.Limits;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.Records;
/**
* Loads the basic job level data upfront.
* Data from job history file is loaded lazily.
*/
public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
static final Log LOG = LogFactory.getLog(CompletedJob.class);
private final Configuration conf;
private final JobId jobId; //Can be picked from JobInfo with a conversion.
private final String user; //Can be picked up from JobInfo
private final HistoryFileInfo info;
private JobInfo jobInfo;
private JobReport report;
AtomicBoolean tasksLoaded = new AtomicBoolean(false);
private Lock tasksLock = new ReentrantLock();
private Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
private Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
private Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
private List<TaskAttemptCompletionEvent> completionEvents = null;
private List<TaskAttemptCompletionEvent> mapCompletionEvents = null;
private JobACLsManager aclsMgr;
public CompletedJob(Configuration conf, JobId jobId, Path historyFile,
boolean loadTasks, String userName, HistoryFileInfo info,
JobACLsManager aclsMgr)
throws IOException {
LOG.info("Loading job: " + jobId + " from file: " + historyFile);
this.conf = conf;
this.jobId = jobId;
this.user = userName;
this.info = info;
this.aclsMgr = aclsMgr;
loadFullHistoryData(loadTasks, historyFile);
}
@Override
public int getCompletedMaps() {
return (int) jobInfo.getFinishedMaps();
}
@Override
public int getCompletedReduces() {
return (int) jobInfo.getFinishedReduces();
}
@Override
public Counters getAllCounters() {
return jobInfo.getTotalCounters();
}
@Override
public JobId getID() {
return jobId;
}
@Override
public synchronized JobReport getReport() {
if (report == null) {
constructJobReport();
}
return report;
}
private void constructJobReport() {
report = Records.newRecord(JobReport.class);
report.setJobId(jobId);
report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
report.setSubmitTime(jobInfo.getSubmitTime());
report.setStartTime(jobInfo.getLaunchTime());
report.setFinishTime(jobInfo.getFinishTime());
report.setJobName(jobInfo.getJobname());
report.setUser(jobInfo.getUsername());
if ( getTotalMaps() == 0 ) {
report.setMapProgress(1.0f);
} else {
report.setMapProgress((float) getCompletedMaps() / getTotalMaps());
}
if ( getTotalReduces() == 0 ) {
report.setReduceProgress(1.0f);
} else {
report.setReduceProgress((float) getCompletedReduces() / getTotalReduces());
}
report.setJobFile(getConfFile().toString());
String historyUrl = "N/A";
try {
historyUrl =
MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(conf,
jobId.getAppId());
} catch (UnknownHostException e) {
LOG.error("Problem determining local host: " + e.getMessage());
}
report.setTrackingUrl(historyUrl);
report.setAMInfos(getAMInfos());
report.setIsUber(isUber());
}
@Override
public float getProgress() {
return 1.0f;
}
@Override
public JobState getState() {
return JobState.valueOf(jobInfo.getJobStatus());
}
@Override
public Task getTask(TaskId taskId) {
if (tasksLoaded.get()) {
return tasks.get(taskId);
} else {
TaskID oldTaskId = TypeConverter.fromYarn(taskId);
CompletedTask completedTask =
new CompletedTask(taskId, jobInfo.getAllTasks().get(oldTaskId));
return completedTask;
}
}
@Override
public synchronized TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
int fromEventId, int maxEvents) {
if (completionEvents == null) {
constructTaskAttemptCompletionEvents();
}
return getAttemptCompletionEvents(completionEvents,
fromEventId, maxEvents);
}
@Override
public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents(
int startIndex, int maxEvents) {
if (mapCompletionEvents == null) {
constructTaskAttemptCompletionEvents();
}
return TypeConverter.fromYarn(getAttemptCompletionEvents(
mapCompletionEvents, startIndex, maxEvents));
}
private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
List<TaskAttemptCompletionEvent> eventList,
int startIndex, int maxEvents) {
TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
if (eventList.size() > startIndex) {
int actualMax = Math.min(maxEvents,
(eventList.size() - startIndex));
events = eventList.subList(startIndex, actualMax + startIndex)
.toArray(events);
}
return events;
}
private void constructTaskAttemptCompletionEvents() {
loadAllTasks();
completionEvents = new LinkedList<TaskAttemptCompletionEvent>();
List<TaskAttempt> allTaskAttempts = new LinkedList<TaskAttempt>();
int numMapAttempts = 0;
for (Map.Entry<TaskId,Task> taskEntry : tasks.entrySet()) {
Task task = taskEntry.getValue();
for (Map.Entry<TaskAttemptId,TaskAttempt> taskAttemptEntry : task.getAttempts().entrySet()) {
TaskAttempt taskAttempt = taskAttemptEntry.getValue();
allTaskAttempts.add(taskAttempt);
if (task.getType() == TaskType.MAP) {
++numMapAttempts;
}
}
}
Collections.sort(allTaskAttempts, new Comparator<TaskAttempt>() {
@Override
public int compare(TaskAttempt o1, TaskAttempt o2) {
if (o1.getFinishTime() == 0 || o2.getFinishTime() == 0) {
if (o1.getFinishTime() == 0 && o2.getFinishTime() == 0) {
if (o1.getLaunchTime() == 0 || o2.getLaunchTime() == 0) {
if (o1.getLaunchTime() == 0 && o2.getLaunchTime() == 0) {
return 0;
} else {
long res = o1.getLaunchTime() - o2.getLaunchTime();
return res > 0 ? -1 : 1;
}
} else {
return (int) (o1.getLaunchTime() - o2.getLaunchTime());
}
} else {
long res = o1.getFinishTime() - o2.getFinishTime();
return res > 0 ? -1 : 1;
}
} else {
return (int) (o1.getFinishTime() - o2.getFinishTime());
}
}
});
mapCompletionEvents =
new ArrayList<TaskAttemptCompletionEvent>(numMapAttempts);
int eventId = 0;
for (TaskAttempt taskAttempt : allTaskAttempts) {
TaskAttemptCompletionEvent tace =
Records.newRecord(TaskAttemptCompletionEvent.class);
int attemptRunTime = -1;
if (taskAttempt.getLaunchTime() != 0 && taskAttempt.getFinishTime() != 0) {
attemptRunTime =
(int) (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
}
// Default to KILLED
TaskAttemptCompletionEventStatus taceStatus =
TaskAttemptCompletionEventStatus.KILLED;
String taStateString = taskAttempt.getState().toString();
try {
taceStatus = TaskAttemptCompletionEventStatus.valueOf(taStateString);
} catch (Exception e) {
LOG.warn("Cannot constuct TACEStatus from TaskAtemptState: ["
+ taStateString + "] for taskAttemptId: [" + taskAttempt.getID()
+ "]. Defaulting to KILLED");
}
tace.setAttemptId(taskAttempt.getID());
tace.setAttemptRunTime(attemptRunTime);
tace.setEventId(eventId++);
tace.setMapOutputServerAddress(taskAttempt
.getAssignedContainerMgrAddress());
tace.setStatus(taceStatus);
completionEvents.add(tace);
if (taskAttempt.getID().getTaskId().getTaskType() == TaskType.MAP) {
mapCompletionEvents.add(tace);
}
}
}
@Override
public Map<TaskId, Task> getTasks() {
loadAllTasks();
return tasks;
}
private void loadAllTasks() {
if (tasksLoaded.get()) {
return;
}
tasksLock.lock();
try {
if (tasksLoaded.get()) {
return;
}
for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
TaskInfo taskInfo = entry.getValue();
Task task = new CompletedTask(yarnTaskID, taskInfo);
tasks.put(yarnTaskID, task);
if (task.getType() == TaskType.MAP) {
mapTasks.put(task.getID(), task);
} else if (task.getType() == TaskType.REDUCE) {
reduceTasks.put(task.getID(), task);
}
}
tasksLoaded.set(true);
} finally {
tasksLock.unlock();
}
}
//History data is leisurely loaded when task level data is requested
protected synchronized void loadFullHistoryData(boolean loadTasks,
Path historyFileAbsolute) throws IOException {
LOG.info("Loading history file: [" + historyFileAbsolute + "]");
if (this.jobInfo != null) {
return;
}
if (historyFileAbsolute != null) {
JobHistoryParser parser = null;
try {
final FileSystem fs = historyFileAbsolute.getFileSystem(conf);
parser = new JobHistoryParser(fs, historyFileAbsolute);
final Path jobConfPath = new Path(historyFileAbsolute.getParent(),
JobHistoryUtils.getIntermediateConfFileName(jobId));
final Configuration conf = new Configuration();
try {
conf.addResource(fs.open(jobConfPath), jobConfPath.toString());
Limits.reset(conf);
} catch (FileNotFoundException fnf) {
if (LOG.isWarnEnabled()) {
LOG.warn("Missing job conf in history", fnf);
}
}
this.jobInfo = parser.parse();
} catch (IOException e) {
throw new YarnRuntimeException("Could not load history file "
+ historyFileAbsolute, e);
}
IOException parseException = parser.getParseException();
if (parseException != null) {
throw new YarnRuntimeException(
"Could not parse history file " + historyFileAbsolute,
parseException);
}
} else {
throw new IOException("History file not found");
}
if (loadTasks) {
loadAllTasks();
LOG.info("TaskInfo loaded");
}
}
@Override
public List<String> getDiagnostics() {
return Collections.singletonList(jobInfo.getErrorInfo());
}
@Override
public String getName() {
return jobInfo.getJobname();
}
@Override
public String getQueueName() {
return jobInfo.getJobQueueName();
}
@Override
public int getTotalMaps() {
return (int) jobInfo.getTotalMaps();
}
@Override
public int getTotalReduces() {
return (int) jobInfo.getTotalReduces();
}
@Override
public boolean isUber() {
return jobInfo.getUberized();
}
@Override
public Map<TaskId, Task> getTasks(TaskType taskType) {
loadAllTasks();
if (TaskType.MAP.equals(taskType)) {
return mapTasks;
} else {//we have only two types of tasks
return reduceTasks;
}
}
@Override
public
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation) {
Map<JobACL, AccessControlList> jobACLs = jobInfo.getJobACLs();
AccessControlList jobACL = jobACLs.get(jobOperation);
if (jobACL == null) {
return true;
}
return aclsMgr.checkAccess(callerUGI, jobOperation,
jobInfo.getUsername(), jobACL);
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.job.Job#getJobACLs()
*/
@Override
public Map<JobACL, AccessControlList> getJobACLs() {
return jobInfo.getJobACLs();
}
@Override
public String getUserName() {
return user;
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.job.Job#getConfFile()
*/
@Override
public Path getConfFile() {
return info.getConfFile();
}
/*
* (non-Javadoc)
* @see org.apache.hadoop.mapreduce.v2.app.job.Job#loadConfFile()
*/
@Override
public Configuration loadConfFile() throws IOException {
return info.loadConfFile();
}
@Override
public List<AMInfo> getAMInfos() {
List<AMInfo> amInfos = new LinkedList<AMInfo>();
for (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo jhAmInfo : jobInfo
.getAMInfos()) {
AMInfo amInfo =
MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
jhAmInfo.getNodeManagerHttpPort());
amInfos.add(amInfo);
}
return amInfos;
}
@Override
public void setQueueName(String queueName) {
throw new UnsupportedOperationException("Can't set job's queue name in history");
}
}
| 15,997
| 31.985567
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.Map;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Provides an API to query jobs that have finished.
*
* For those implementing this API be aware that there is no feedback when
* files are removed from HDFS. You may rely on HistoryFileManager to help
* you know when that has happened if you have not made a complete backup of
* the data stored on HDFS.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public interface HistoryStorage {
/**
* Give the Storage a reference to a class that can be used to interact with
* history files.
* @param hsManager the class that is used to interact with history files.
*/
void setHistoryFileManager(HistoryFileManager hsManager);
/**
* Look for a set of partial jobs.
* @param offset the offset into the list of jobs.
* @param count the maximum number of jobs to return.
* @param user only return jobs for the given user.
* @param queue only return jobs for in the given queue.
* @param sBegin only return Jobs that started on or after the given time.
* @param sEnd only return Jobs that started on or before the given time.
* @param fBegin only return Jobs that ended on or after the given time.
* @param fEnd only return Jobs that ended on or before the given time.
* @param jobState only return Jobs that are in the given job state.
* @return The list of filtered jobs.
*/
JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd,
JobState jobState);
/**
* Get all of the cached jobs. This only returns partial jobs and is here for
* legacy reasons.
* @return all of the cached jobs
*/
Map<JobId, Job> getAllPartialJobs();
/**
* Get a fully parsed job.
* @param jobId the id of the job
* @return the job, or null if it is not found.
*/
Job getFullJob(JobId jobId);
}
| 3,072
| 37.4125
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.net.InetAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.ipc.Server;
@Private
public class HSAuditLogger {
private static final Log LOG = LogFactory.getLog(HSAuditLogger.class);
static enum Keys {
USER, OPERATION, TARGET, RESULT, IP, PERMISSIONS, DESCRIPTION
}
public static class AuditConstants {
static final String SUCCESS = "SUCCESS";
static final String FAILURE = "FAILURE";
static final String KEY_VAL_SEPARATOR = "=";
static final char PAIR_SEPARATOR = '\t';
// Some commonly used descriptions
public static final String UNAUTHORIZED_USER = "Unauthorized user";
}
/**
* Create a readable and parseable audit log string for a successful event.
*
* @param user
* User who made the service request.
* @param operation
* Operation requested by the user.
* @param target
* The target on which the operation is being performed.
*
* <br>
* <br>
* Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val
* delimiter and hence the value fields should not contains tabs
* ('\t').
*/
public static void logSuccess(String user, String operation, String target) {
if (LOG.isInfoEnabled()) {
LOG.info(createSuccessLog(user, operation, target));
}
}
/**
* A helper api for creating an audit log for a successful event.
*/
static String createSuccessLog(String user, String operation, String target) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.SUCCESS, b);
return b.toString();
}
/**
* A helper api to add remote IP address
*/
static void addRemoteIP(StringBuilder b) {
InetAddress ip = Server.getRemoteIp();
// ip address can be null for testcases
if (ip != null) {
add(Keys.IP, ip.getHostAddress(), b);
}
}
/**
* Appends the key-val pair to the passed builder in the following format
* <pair-delim>key=value
*/
static void add(Keys key, String value, StringBuilder b) {
b.append(AuditConstants.PAIR_SEPARATOR).append(key.name())
.append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
}
/**
* Adds the first key-val pair to the passed builder in the following format
* key=value
*/
static void start(Keys key, String value, StringBuilder b) {
b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
}
/**
* Create a readable and parseable audit log string for a failed event.
*
* @param user
* User who made the service request.
* @param operation
* Operation requested by the user.
* @param perm
* Target permissions.
* @param target
* The target on which the operation is being performed.
* @param description
* Some additional information as to why the operation failed.
*
* <br>
* <br>
* Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val
* delimiter and hence the value fields should not contains tabs
* ('\t').
*/
public static void logFailure(String user, String operation, String perm,
String target, String description) {
if (LOG.isWarnEnabled()) {
LOG.warn(createFailureLog(user, operation, perm, target, description));
}
}
/**
* A helper api for creating an audit log for a failure event.
*/
static String createFailureLog(String user, String operation, String perm,
String target, String description) {
StringBuilder b = new StringBuilder();
start(Keys.USER, user, b);
addRemoteIP(b);
add(Keys.OPERATION, operation, b);
add(Keys.TARGET, target, b);
add(Keys.RESULT, AuditConstants.FAILURE, b);
add(Keys.DESCRIPTION, description, b);
add(Keys.PERMISSIONS, perm, b);
return b.toString();
}
}
| 4,982
| 31.568627
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HSProxies.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol;
import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocolPB;
import org.apache.hadoop.mapreduce.v2.hs.protocolPB.HSAdminRefreshProtocolClientSideTranslatorPB;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB;
@Private
public class HSProxies {
private static final Log LOG = LogFactory.getLog(HSProxies.class);
@SuppressWarnings("unchecked")
public static <T> T createProxy(Configuration conf, InetSocketAddress hsaddr,
Class<T> xface, UserGroupInformation ugi) throws IOException {
T proxy;
if (xface == RefreshUserMappingsProtocol.class) {
proxy = (T) createHSProxyWithRefreshUserMappingsProtocol(hsaddr, conf,
ugi);
} else if (xface == GetUserMappingsProtocol.class) {
proxy = (T) createHSProxyWithGetUserMappingsProtocol(hsaddr, conf, ugi);
} else if (xface == HSAdminRefreshProtocol.class) {
proxy = (T) createHSProxyWithHSAdminRefreshProtocol(hsaddr, conf, ugi);
} else {
String message = "Unsupported protocol found when creating the proxy "
+ "connection to History server: "
+ ((xface != null) ? xface.getClass().getName() : "null");
LOG.error(message);
throw new IllegalStateException(message);
}
return proxy;
}
private static RefreshUserMappingsProtocol createHSProxyWithRefreshUserMappingsProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
RefreshUserMappingsProtocolPB proxy = (RefreshUserMappingsProtocolPB) createHSProxy(
address, conf, ugi, RefreshUserMappingsProtocolPB.class, 0);
return new RefreshUserMappingsProtocolClientSideTranslatorPB(proxy);
}
private static GetUserMappingsProtocol createHSProxyWithGetUserMappingsProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB) createHSProxy(
address, conf, ugi, GetUserMappingsProtocolPB.class, 0);
return new GetUserMappingsProtocolClientSideTranslatorPB(proxy);
}
private static HSAdminRefreshProtocol createHSProxyWithHSAdminRefreshProtocol(
InetSocketAddress hsaddr, Configuration conf, UserGroupInformation ugi)
throws IOException {
HSAdminRefreshProtocolPB proxy = (HSAdminRefreshProtocolPB) createHSProxy(
hsaddr, conf, ugi, HSAdminRefreshProtocolPB.class, 0);
return new HSAdminRefreshProtocolClientSideTranslatorPB(proxy);
}
private static Object createHSProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class<?> xface,
int rpcTimeout) throws IOException {
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout);
return proxy;
}
}
| 4,656
| 44.656863
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.app.ClusterInfo;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor;
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
import org.apache.hadoop.yarn.util.Clock;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* Loads and manages the Job history cache.
*/
public class JobHistory extends AbstractService implements HistoryContext {
private static final Log LOG = LogFactory.getLog(JobHistory.class);
public static final Pattern CONF_FILENAME_REGEX = Pattern.compile("("
+ JobID.JOBID_REGEX + ")_conf.xml(?:\\.[0-9]+\\.old)?");
public static final String OLD_SUFFIX = ".old";
// Time interval for the move thread.
private long moveThreadInterval;
private Configuration conf;
private ScheduledThreadPoolExecutor scheduledExecutor = null;
private HistoryStorage storage = null;
private HistoryFileManager hsManager = null;
ScheduledFuture<?> futureHistoryCleaner = null;
//History job cleaner interval
private long cleanerInterval;
@Override
protected void serviceInit(Configuration conf) throws Exception {
LOG.info("JobHistory Init");
this.conf = conf;
this.appID = ApplicationId.newInstance(0, 0);
this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf)
.newRecordInstance(ApplicationAttemptId.class);
moveThreadInterval = conf.getLong(
JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS);
hsManager = createHistoryFileManager();
hsManager.init(conf);
try {
hsManager.initExisting();
} catch (IOException e) {
throw new YarnRuntimeException("Failed to intialize existing directories", e);
}
storage = createHistoryStorage();
if (storage instanceof Service) {
((Service) storage).init(conf);
}
storage.setHistoryFileManager(hsManager);
super.serviceInit(conf);
}
protected HistoryStorage createHistoryStorage() {
return ReflectionUtils.newInstance(conf.getClass(
JHAdminConfig.MR_HISTORY_STORAGE, CachedHistoryStorage.class,
HistoryStorage.class), conf);
}
protected HistoryFileManager createHistoryFileManager() {
return new HistoryFileManager();
}
@Override
protected void serviceStart() throws Exception {
hsManager.start();
if (storage instanceof Service) {
((Service) storage).start();
}
scheduledExecutor = new ScheduledThreadPoolExecutor(2,
new ThreadFactoryBuilder().setNameFormat("Log Scanner/Cleaner #%d")
.build());
scheduledExecutor.scheduleAtFixedRate(new MoveIntermediateToDoneRunnable(),
moveThreadInterval, moveThreadInterval, TimeUnit.MILLISECONDS);
// Start historyCleaner
scheduleHistoryCleaner();
super.serviceStart();
}
protected int getInitDelaySecs() {
return 30;
}
@Override
protected void serviceStop() throws Exception {
LOG.info("Stopping JobHistory");
if (scheduledExecutor != null) {
LOG.info("Stopping History Cleaner/Move To Done");
scheduledExecutor.shutdown();
boolean interrupted = false;
long currentTime = System.currentTimeMillis();
while (!scheduledExecutor.isShutdown()
&& System.currentTimeMillis() > currentTime + 1000l && !interrupted) {
try {
Thread.sleep(20);
} catch (InterruptedException e) {
interrupted = true;
}
}
if (!scheduledExecutor.isShutdown()) {
LOG.warn("HistoryCleanerService/move to done shutdown may not have " +
"succeeded, Forcing a shutdown");
scheduledExecutor.shutdownNow();
}
}
if (storage != null && storage instanceof Service) {
((Service) storage).stop();
}
if (hsManager != null) {
hsManager.stop();
}
super.serviceStop();
}
public JobHistory() {
super(JobHistory.class.getName());
}
@Override
public String getApplicationName() {
return "Job History Server";
}
private class MoveIntermediateToDoneRunnable implements Runnable {
@Override
public void run() {
try {
LOG.info("Starting scan to move intermediate done files");
hsManager.scanIntermediateDirectory();
} catch (IOException e) {
LOG.error("Error while scanning intermediate done dir ", e);
}
}
}
private class HistoryCleaner implements Runnable {
public void run() {
LOG.info("History Cleaner started");
try {
hsManager.clean();
} catch (IOException e) {
LOG.warn("Error trying to clean up ", e);
}
LOG.info("History Cleaner complete");
}
}
/**
* Helper method for test cases.
*/
HistoryFileInfo getJobFileInfo(JobId jobId) throws IOException {
return hsManager.getFileInfo(jobId);
}
@Override
public Job getJob(JobId jobId) {
return storage.getFullJob(jobId);
}
@Override
public Map<JobId, Job> getAllJobs(ApplicationId appID) {
if (LOG.isDebugEnabled()) {
LOG.debug("Called getAllJobs(AppId): " + appID);
}
// currently there is 1 to 1 mapping between app and job id
org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID);
Map<JobId, Job> jobs = new HashMap<JobId, Job>();
JobId jobID = TypeConverter.toYarn(oldJobID);
jobs.put(jobID, getJob(jobID));
return jobs;
}
@Override
public Map<JobId, Job> getAllJobs() {
return storage.getAllPartialJobs();
}
public void refreshLoadedJobCache() {
if (getServiceState() == STATE.STARTED) {
if (storage instanceof CachedHistoryStorage) {
((CachedHistoryStorage) storage).refreshLoadedJobCache();
} else {
throw new UnsupportedOperationException(storage.getClass().getName()
+ " is expected to be an instance of "
+ CachedHistoryStorage.class.getName());
}
} else {
LOG.warn("Failed to execute refreshLoadedJobCache: JobHistory service is not started");
}
}
@VisibleForTesting
HistoryStorage getHistoryStorage() {
return storage;
}
/**
* Look for a set of partial jobs.
*
* @param offset
* the offset into the list of jobs.
* @param count
* the maximum number of jobs to return.
* @param user
* only return jobs for the given user.
* @param queue
* only return jobs for in the given queue.
* @param sBegin
* only return Jobs that started on or after the given time.
* @param sEnd
* only return Jobs that started on or before the given time.
* @param fBegin
* only return Jobs that ended on or after the given time.
* @param fEnd
* only return Jobs that ended on or before the given time.
* @param jobState
* only return jobs that are in the give job state.
* @return The list of filtered jobs.
*/
@Override
public JobsInfo getPartialJobs(Long offset, Long count, String user,
String queue, Long sBegin, Long sEnd, Long fBegin, Long fEnd,
JobState jobState) {
return storage.getPartialJobs(offset, count, user, queue, sBegin, sEnd,
fBegin, fEnd, jobState);
}
public void refreshJobRetentionSettings() {
if (getServiceState() == STATE.STARTED) {
conf = createConf();
long maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE);
hsManager.setMaxHistoryAge(maxHistoryAge);
if (futureHistoryCleaner != null) {
futureHistoryCleaner.cancel(false);
}
futureHistoryCleaner = null;
scheduleHistoryCleaner();
} else {
LOG.warn("Failed to execute refreshJobRetentionSettings : Job History service is not started");
}
}
private void scheduleHistoryCleaner() {
boolean startCleanerService = conf.getBoolean(
JHAdminConfig.MR_HISTORY_CLEANER_ENABLE, true);
if (startCleanerService) {
cleanerInterval = conf.getLong(
JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS);
futureHistoryCleaner = scheduledExecutor.scheduleAtFixedRate(
new HistoryCleaner(), getInitDelaySecs() * 1000l, cleanerInterval,
TimeUnit.MILLISECONDS);
}
}
protected Configuration createConf() {
return new Configuration();
}
public long getCleanerInterval() {
return cleanerInterval;
}
// TODO AppContext - Not Required
private ApplicationAttemptId appAttemptID;
@Override
public ApplicationAttemptId getApplicationAttemptId() {
// TODO fixme - bogus appAttemptID for now
return appAttemptID;
}
// TODO AppContext - Not Required
private ApplicationId appID;
@Override
public ApplicationId getApplicationID() {
// TODO fixme - bogus appID for now
return appID;
}
// TODO AppContext - Not Required
@Override
public EventHandler getEventHandler() {
// TODO Auto-generated method stub
return null;
}
// TODO AppContext - Not Required
private String userName;
@Override
public CharSequence getUser() {
if (userName != null) {
userName = conf.get(MRJobConfig.USER_NAME, "history-user");
}
return userName;
}
// TODO AppContext - Not Required
@Override
public Clock getClock() {
return null;
}
// TODO AppContext - Not Required
@Override
public ClusterInfo getClusterInfo() {
return null;
}
// TODO AppContext - Not Required
@Override
public Set<String> getBlacklistedNodes() {
// Not Implemented
return null;
}
@Override
public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() {
// Not implemented.
return null;
}
@Override
public boolean isLastAMRetry() {
// bogus - Not Required
return false;
}
@Override
public boolean hasSuccessfullyUnregistered() {
// bogus - Not Required
return true;
}
@Override
public String getNMHostname() {
// bogus - Not Required
return null;
}
@Override
public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() {
return null;
}
}
| 12,485
| 29.528117
| 101
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerFileSystemStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.Shell;
@Private
@Unstable
/**
* A history server state storage implementation that supports any persistent
* storage that adheres to the FileSystem interface.
*/
public class HistoryServerFileSystemStateStoreService
extends HistoryServerStateStoreService {
public static final Log LOG =
LogFactory.getLog(HistoryServerFileSystemStateStoreService.class);
private static final String ROOT_STATE_DIR_NAME = "HistoryServerState";
private static final String TOKEN_STATE_DIR_NAME = "tokens";
private static final String TOKEN_KEYS_DIR_NAME = "keys";
private static final String TOKEN_BUCKET_DIR_PREFIX = "tb_";
private static final String TOKEN_BUCKET_NAME_FORMAT =
TOKEN_BUCKET_DIR_PREFIX + "%03d";
private static final String TOKEN_MASTER_KEY_FILE_PREFIX = "key_";
private static final String TOKEN_FILE_PREFIX = "token_";
private static final String TMP_FILE_PREFIX = "tmp-";
private static final String UPDATE_TMP_FILE_PREFIX = "update-";
private static final FsPermission DIR_PERMISSIONS =
new FsPermission((short)0700);
private static final FsPermission FILE_PERMISSIONS = Shell.WINDOWS
? new FsPermission((short) 0700) : new FsPermission((short) 0400);
private static final int NUM_TOKEN_BUCKETS = 1000;
private FileSystem fs;
private Path rootStatePath;
private Path tokenStatePath;
private Path tokenKeysStatePath;
@Override
protected void initStorage(Configuration conf)
throws IOException {
final String storeUri = conf.get(JHAdminConfig.MR_HS_FS_STATE_STORE_URI);
if (storeUri == null) {
throw new IOException("No store location URI configured in " +
JHAdminConfig.MR_HS_FS_STATE_STORE_URI);
}
LOG.info("Using " + storeUri + " for history server state storage");
rootStatePath = new Path(storeUri, ROOT_STATE_DIR_NAME);
}
@Override
protected void startStorage() throws IOException {
fs = createFileSystem();
createDir(rootStatePath);
tokenStatePath = new Path(rootStatePath, TOKEN_STATE_DIR_NAME);
createDir(tokenStatePath);
tokenKeysStatePath = new Path(tokenStatePath, TOKEN_KEYS_DIR_NAME);
createDir(tokenKeysStatePath);
for (int i=0; i < NUM_TOKEN_BUCKETS; ++i) {
createDir(getTokenBucketPath(i));
}
}
FileSystem createFileSystem() throws IOException {
return rootStatePath.getFileSystem(getConfig());
}
@Override
protected void closeStorage() throws IOException {
// don't close the filesystem as it's part of the filesystem cache
// and other clients may still be using it
}
@Override
public HistoryServerState loadState() throws IOException {
LOG.info("Loading history server state from " + rootStatePath);
HistoryServerState state = new HistoryServerState();
loadTokenState(state);
return state;
}
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
Path tokenPath = getTokenPath(tokenId);
if (fs.exists(tokenPath)) {
throw new IOException(tokenPath + " already exists");
}
createNewFile(tokenPath, buildTokenData(tokenId, renewDate));
}
@Override
public void updateToken(MRDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Updating token " + tokenId.getSequenceNumber());
}
// Files cannot be atomically replaced, therefore we write a temporary
// update file, remove the original token file, then rename the update
// file to the token file. During recovery either the token file will be
// used or if that is missing and an update file is present then the
// update file is used.
Path tokenPath = getTokenPath(tokenId);
Path tmp = new Path(tokenPath.getParent(),
UPDATE_TMP_FILE_PREFIX + tokenPath.getName());
writeFile(tmp, buildTokenData(tokenId, renewDate));
try {
deleteFile(tokenPath);
} catch (IOException e) {
fs.delete(tmp, false);
throw e;
}
if (!fs.rename(tmp, tokenPath)) {
throw new IOException("Could not rename " + tmp + " to " + tokenPath);
}
}
@Override
public void removeToken(MRDelegationTokenIdentifier tokenId)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing token " + tokenId.getSequenceNumber());
}
deleteFile(getTokenPath(tokenId));
}
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + key.getKeyId());
}
Path keyPath = new Path(tokenKeysStatePath,
TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId());
if (fs.exists(keyPath)) {
throw new IOException(keyPath + " already exists");
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
createNewFile(keyPath, memStream.toByteArray());
}
@Override
public void removeTokenMasterKey(DelegationKey key)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing master key " + key.getKeyId());
}
Path keyPath = new Path(tokenKeysStatePath,
TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId());
deleteFile(keyPath);
}
private static int getBucketId(MRDelegationTokenIdentifier tokenId) {
return tokenId.getSequenceNumber() % NUM_TOKEN_BUCKETS;
}
private Path getTokenBucketPath(int bucketId) {
return new Path(tokenStatePath,
String.format(TOKEN_BUCKET_NAME_FORMAT, bucketId));
}
private Path getTokenPath(MRDelegationTokenIdentifier tokenId) {
Path bucketPath = getTokenBucketPath(getBucketId(tokenId));
return new Path(bucketPath,
TOKEN_FILE_PREFIX + tokenId.getSequenceNumber());
}
private void createDir(Path dir) throws IOException {
try {
FileStatus status = fs.getFileStatus(dir);
if (!status.isDirectory()) {
throw new FileAlreadyExistsException("Unexpected file in store: "
+ dir);
}
if (!status.getPermission().equals(DIR_PERMISSIONS)) {
fs.setPermission(dir, DIR_PERMISSIONS);
}
} catch (FileNotFoundException e) {
fs.mkdirs(dir, DIR_PERMISSIONS);
}
}
private void createNewFile(Path file, byte[] data)
throws IOException {
Path tmp = new Path(file.getParent(), TMP_FILE_PREFIX + file.getName());
writeFile(tmp, data);
try {
if (!fs.rename(tmp, file)) {
throw new IOException("Could not rename " + tmp + " to " + file);
}
} catch (IOException e) {
fs.delete(tmp, false);
throw e;
}
}
private void writeFile(Path file, byte[] data) throws IOException {
final int WRITE_BUFFER_SIZE = 4096;
FSDataOutputStream out = fs.create(file, FILE_PERMISSIONS, true,
WRITE_BUFFER_SIZE, fs.getDefaultReplication(file),
fs.getDefaultBlockSize(file), null);
try {
try {
out.write(data);
out.close();
out = null;
} finally {
IOUtils.cleanup(LOG, out);
}
} catch (IOException e) {
fs.delete(file, false);
throw e;
}
}
private byte[] readFile(Path file, long numBytes) throws IOException {
byte[] data = new byte[(int)numBytes];
FSDataInputStream in = fs.open(file);
try {
in.readFully(data);
} finally {
IOUtils.cleanup(LOG, in);
}
return data;
}
private void deleteFile(Path file) throws IOException {
boolean deleted;
try {
deleted = fs.delete(file, false);
} catch (FileNotFoundException e) {
deleted = true;
}
if (!deleted) {
throw new IOException("Unable to delete " + file);
}
}
private byte[] buildTokenData(MRDelegationTokenIdentifier tokenId,
Long renewDate) throws IOException {
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
tokenId.write(dataStream);
dataStream.writeLong(renewDate);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
return memStream.toByteArray();
}
private void loadTokenMasterKey(HistoryServerState state, Path keyFile,
long numKeyFileBytes) throws IOException {
DelegationKey key = new DelegationKey();
byte[] keyData = readFile(keyFile, numKeyFileBytes);
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(keyData));
try {
key.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
private void loadTokenFromBucket(int bucketId,
HistoryServerState state, Path tokenFile, long numTokenFileBytes)
throws IOException {
MRDelegationTokenIdentifier token =
loadToken(state, tokenFile, numTokenFileBytes);
int tokenBucketId = getBucketId(token);
if (tokenBucketId != bucketId) {
throw new IOException("Token " + tokenFile
+ " should be in bucket " + tokenBucketId + ", found in bucket "
+ bucketId);
}
}
private MRDelegationTokenIdentifier loadToken(HistoryServerState state,
Path tokenFile, long numTokenFileBytes) throws IOException {
MRDelegationTokenIdentifier tokenId = new MRDelegationTokenIdentifier();
long renewDate;
byte[] tokenData = readFile(tokenFile, numTokenFileBytes);
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(tokenData));
try {
tokenId.readFields(in);
renewDate = in.readLong();
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenState.put(tokenId, renewDate);
return tokenId;
}
private int loadTokensFromBucket(HistoryServerState state, Path bucket)
throws IOException {
String numStr =
bucket.getName().substring(TOKEN_BUCKET_DIR_PREFIX.length());
final int bucketId = Integer.parseInt(numStr);
int numTokens = 0;
FileStatus[] tokenStats = fs.listStatus(bucket);
Set<String> loadedTokens = new HashSet<String>(tokenStats.length);
for (FileStatus stat : tokenStats) {
String name = stat.getPath().getName();
if (name.startsWith(TOKEN_FILE_PREFIX)) {
loadTokenFromBucket(bucketId, state, stat.getPath(), stat.getLen());
loadedTokens.add(name);
++numTokens;
} else if (name.startsWith(UPDATE_TMP_FILE_PREFIX)) {
String tokenName = name.substring(UPDATE_TMP_FILE_PREFIX.length());
if (loadedTokens.contains(tokenName)) {
// already have the token, update may be partial so ignore it
fs.delete(stat.getPath(), false);
} else {
// token is missing, so try to parse the update temp file
loadTokenFromBucket(bucketId, state, stat.getPath(), stat.getLen());
fs.rename(stat.getPath(),
new Path(stat.getPath().getParent(), tokenName));
loadedTokens.add(tokenName);
++numTokens;
}
} else if (name.startsWith(TMP_FILE_PREFIX)) {
// cleanup incomplete temp files
fs.delete(stat.getPath(), false);
} else {
LOG.warn("Skipping unexpected file in history server token bucket: "
+ stat.getPath());
}
}
return numTokens;
}
private int loadKeys(HistoryServerState state) throws IOException {
FileStatus[] stats = fs.listStatus(tokenKeysStatePath);
int numKeys = 0;
for (FileStatus stat : stats) {
String name = stat.getPath().getName();
if (name.startsWith(TOKEN_MASTER_KEY_FILE_PREFIX)) {
loadTokenMasterKey(state, stat.getPath(), stat.getLen());
++numKeys;
} else {
LOG.warn("Skipping unexpected file in history server token state: "
+ stat.getPath());
}
}
return numKeys;
}
private int loadTokens(HistoryServerState state) throws IOException {
FileStatus[] stats = fs.listStatus(tokenStatePath);
int numTokens = 0;
for (FileStatus stat : stats) {
String name = stat.getPath().getName();
if (name.startsWith(TOKEN_BUCKET_DIR_PREFIX)) {
numTokens += loadTokensFromBucket(state, stat.getPath());
} else if (name.equals(TOKEN_KEYS_DIR_NAME)) {
// key loading is done elsewhere
continue;
} else {
LOG.warn("Skipping unexpected file in history server token state: "
+ stat.getPath());
}
}
return numTokens;
}
private void loadTokenState(HistoryServerState state) throws IOException {
int numKeys = loadKeys(state);
int numTokens = loadTokens(state);
LOG.info("Loaded " + numKeys + " master keys and " + numTokens
+ " tokens from " + tokenStatePath);
}
}
| 14,991
| 33.306636
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.v2.hs.HistoryServerStateStoreService.HistoryServerState;
import org.apache.hadoop.mapreduce.v2.hs.server.HSAdminServer;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService;
import com.google.common.annotations.VisibleForTesting;
/******************************************************************
* {@link JobHistoryServer} is responsible for servicing all job history
* related requests from client.
*
*****************************************************************/
public class JobHistoryServer extends CompositeService {
/**
* Priority of the JobHistoryServer shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
public static final long historyServerTimeStamp = System.currentTimeMillis();
private static final Log LOG = LogFactory.getLog(JobHistoryServer.class);
protected HistoryContext historyContext;
private HistoryClientService clientService;
private JobHistory jobHistoryService;
protected JHSDelegationTokenSecretManager jhsDTSecretManager;
private AggregatedLogDeletionService aggLogDelService;
private HSAdminServer hsAdminServer;
private HistoryServerStateStoreService stateStore;
private JvmPauseMonitor pauseMonitor;
// utility class to start and stop secret manager as part of service
// framework and implement state recovery for secret manager on startup
private class HistoryServerSecretManagerService
extends AbstractService {
public HistoryServerSecretManagerService() {
super(HistoryServerSecretManagerService.class.getName());
}
@Override
protected void serviceStart() throws Exception {
boolean recoveryEnabled = getConfig().getBoolean(
JHAdminConfig.MR_HS_RECOVERY_ENABLE,
JHAdminConfig.DEFAULT_MR_HS_RECOVERY_ENABLE);
if (recoveryEnabled) {
assert stateStore.isInState(STATE.STARTED);
HistoryServerState state = stateStore.loadState();
jhsDTSecretManager.recover(state);
}
try {
jhsDTSecretManager.startThreads();
} catch(IOException io) {
LOG.error("Error while starting the Secret Manager threads", io);
throw io;
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (jhsDTSecretManager != null) {
jhsDTSecretManager.stopThreads();
}
super.serviceStop();
}
}
public JobHistoryServer() {
super(JobHistoryServer.class.getName());
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
Configuration config = new YarnConfiguration(conf);
config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true);
// This is required for WebApps to use https if enabled.
MRWebAppUtil.initialize(getConfig());
try {
doSecureLogin(conf);
} catch(IOException ie) {
throw new YarnRuntimeException("History Server Failed to login", ie);
}
jobHistoryService = new JobHistory();
historyContext = (HistoryContext)jobHistoryService;
stateStore = createStateStore(conf);
this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore);
clientService = createHistoryClientService();
aggLogDelService = new AggregatedLogDeletionService();
hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService);
addService(stateStore);
addService(new HistoryServerSecretManagerService());
addService(jobHistoryService);
addService(clientService);
addService(aggLogDelService);
addService(hsAdminServer);
DefaultMetricsSystem.initialize("JobHistoryServer");
JvmMetrics jm = JvmMetrics.initSingleton("JobHistoryServer", null);
pauseMonitor = new JvmPauseMonitor(getConfig());
jm.setPauseMonitor(pauseMonitor);
super.serviceInit(config);
}
@VisibleForTesting
protected HistoryClientService createHistoryClientService() {
return new HistoryClientService(historyContext,
this.jhsDTSecretManager);
}
protected JHSDelegationTokenSecretManager createJHSSecretManager(
Configuration conf, HistoryServerStateStoreService store) {
long secretKeyInterval =
conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
long tokenMaxLifetime =
conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
long tokenRenewInterval =
conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
return new JHSDelegationTokenSecretManager(secretKeyInterval,
tokenMaxLifetime, tokenRenewInterval, 3600000, store);
}
protected HistoryServerStateStoreService createStateStore(
Configuration conf) {
return HistoryServerStateStoreServiceFactory.getStore(conf);
}
protected void doSecureLogin(Configuration conf) throws IOException {
InetSocketAddress socAddr = getBindAddress(conf);
SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB,
JHAdminConfig.MR_HISTORY_PRINCIPAL, socAddr.getHostName());
}
/**
* Retrieve JHS bind address from configuration
*
* @param conf
* @return InetSocketAddress
*/
public static InetSocketAddress getBindAddress(Configuration conf) {
return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
}
@Override
protected void serviceStart() throws Exception {
pauseMonitor.start();
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
DefaultMetricsSystem.shutdown();
if (pauseMonitor != null) {
pauseMonitor.stop();
}
super.serviceStop();
}
@Private
public HistoryClientService getClientService() {
return this.clientService;
}
static JobHistoryServer launchJobHistoryServer(String[] args) {
Thread.
setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
JobHistoryServer jobHistoryServer = null;
try {
jobHistoryServer = new JobHistoryServer();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(jobHistoryServer),
SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
new GenericOptionsParser(conf, args);
jobHistoryServer.init(conf);
jobHistoryServer.start();
} catch (Throwable t) {
LOG.fatal("Error starting JobHistoryServer", t);
ExitUtil.terminate(-1, "Error starting JobHistoryServer");
}
return jobHistoryServer;
}
public static void main(String[] args) {
launchJobHistoryServer(args);
}
}
| 8,945
| 35.663934
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerStateStoreServiceFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.util.ReflectionUtils;
public class HistoryServerStateStoreServiceFactory {
/**
* Constructs an instance of the configured storage class
*
* @param conf the configuration
* @return the state storage instance
*/
public static HistoryServerStateStoreService getStore(Configuration conf) {
Class<? extends HistoryServerStateStoreService> storeClass =
HistoryServerNullStateStoreService.class;
boolean recoveryEnabled = conf.getBoolean(
JHAdminConfig.MR_HS_RECOVERY_ENABLE,
JHAdminConfig.DEFAULT_MR_HS_RECOVERY_ENABLE);
if (recoveryEnabled) {
storeClass = conf.getClass(JHAdminConfig.MR_HS_STATE_STORE, null,
HistoryServerStateStoreService.class);
if (storeClass == null) {
throw new RuntimeException("Unable to locate storage class, check "
+ JHAdminConfig.MR_HS_STATE_STORE);
}
}
return ReflectionUtils.newInstance(storeClass, conf);
}
}
| 1,936
| 38.530612
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerLeveldbStateStoreService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import static org.fusesource.leveldbjni.JniDBFactory.asString;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.fusesource.leveldbjni.JniDBFactory;
import org.fusesource.leveldbjni.internal.NativeDB;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Logger;
import org.iq80.leveldb.Options;
public class HistoryServerLeveldbStateStoreService extends
HistoryServerStateStoreService {
private static final String DB_NAME = "mr-jhs-state";
private static final String DB_SCHEMA_VERSION_KEY = "jhs-schema-version";
private static final String TOKEN_MASTER_KEY_KEY_PREFIX = "tokens/key_";
private static final String TOKEN_STATE_KEY_PREFIX = "tokens/token_";
private static final Version CURRENT_VERSION_INFO =
Version.newInstance(1, 0);
private DB db;
public static final Log LOG =
LogFactory.getLog(HistoryServerLeveldbStateStoreService.class);
@Override
protected void initStorage(Configuration conf) throws IOException {
}
@Override
protected void startStorage() throws IOException {
Path storeRoot = createStorageDir(getConfig());
Options options = new Options();
options.createIfMissing(false);
options.logger(new LeveldbLogger());
LOG.info("Using state database at " + storeRoot + " for recovery");
File dbfile = new File(storeRoot.toString());
try {
db = JniDBFactory.factory.open(dbfile, options);
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
LOG.info("Creating state database at " + dbfile);
options.createIfMissing(true);
try {
db = JniDBFactory.factory.open(dbfile, options);
// store version
storeVersion();
} catch (DBException dbErr) {
throw new IOException(dbErr.getMessage(), dbErr);
}
} else {
throw e;
}
}
checkVersion();
}
@Override
protected void closeStorage() throws IOException {
if (db != null) {
db.close();
db = null;
}
}
@Override
public HistoryServerState loadState() throws IOException {
HistoryServerState state = new HistoryServerState();
int numKeys = loadTokenMasterKeys(state);
LOG.info("Recovered " + numKeys + " token master keys");
int numTokens = loadTokens(state);
LOG.info("Recovered " + numTokens + " tokens");
return state;
}
private int loadTokenMasterKeys(HistoryServerState state)
throws IOException {
int numKeys = 0;
LeveldbIterator iter = null;
try {
iter = new LeveldbIterator(db);
iter.seek(bytes(TOKEN_MASTER_KEY_KEY_PREFIX));
while (iter.hasNext()) {
Entry<byte[],byte[]> entry = iter.next();
String key = asString(entry.getKey());
if (!key.startsWith(TOKEN_MASTER_KEY_KEY_PREFIX)) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loading master key from " + key);
}
try {
loadTokenMasterKey(state, entry.getValue());
} catch (IOException e) {
throw new IOException("Error loading token master key from " + key,
e);
}
++numKeys;
}
} catch (DBException e) {
throw new IOException(e);
} finally {
if (iter != null) {
iter.close();
}
}
return numKeys;
}
private void loadTokenMasterKey(HistoryServerState state, byte[] data)
throws IOException {
DelegationKey key = new DelegationKey();
DataInputStream in =
new DataInputStream(new ByteArrayInputStream(data));
try {
key.readFields(in);
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenMasterKeyState.add(key);
}
private int loadTokens(HistoryServerState state) throws IOException {
int numTokens = 0;
LeveldbIterator iter = null;
try {
iter = new LeveldbIterator(db);
iter.seek(bytes(TOKEN_STATE_KEY_PREFIX));
while (iter.hasNext()) {
Entry<byte[],byte[]> entry = iter.next();
String key = asString(entry.getKey());
if (!key.startsWith(TOKEN_STATE_KEY_PREFIX)) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loading token from " + key);
}
try {
loadToken(state, entry.getValue());
} catch (IOException e) {
throw new IOException("Error loading token state from " + key, e);
}
++numTokens;
}
} catch (DBException e) {
throw new IOException(e);
} finally {
if (iter != null) {
iter.close();
}
}
return numTokens;
}
private void loadToken(HistoryServerState state, byte[] data)
throws IOException {
MRDelegationTokenIdentifier tokenId = new MRDelegationTokenIdentifier();
long renewDate;
DataInputStream in = new DataInputStream(new ByteArrayInputStream(data));
try {
tokenId.readFields(in);
renewDate = in.readLong();
} finally {
IOUtils.cleanup(LOG, in);
}
state.tokenState.put(tokenId, renewDate);
}
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
tokenId.write(dataStream);
dataStream.writeLong(renewDate);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
String dbKey = getTokenDatabaseKey(tokenId);
try {
db.put(bytes(dbKey), memStream.toByteArray());
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void updateToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
storeToken(tokenId, renewDate);
}
@Override
public void removeToken(MRDelegationTokenIdentifier tokenId)
throws IOException {
String dbKey = getTokenDatabaseKey(tokenId);
try {
db.delete(bytes(dbKey));
} catch (DBException e) {
throw new IOException(e);
}
}
private String getTokenDatabaseKey(MRDelegationTokenIdentifier tokenId) {
return TOKEN_STATE_KEY_PREFIX + tokenId.getSequenceNumber();
}
@Override
public void storeTokenMasterKey(DelegationKey masterKey)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + masterKey.getKeyId());
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
masterKey.write(dataStream);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
String dbKey = getTokenMasterKeyDatabaseKey(masterKey);
try {
db.put(bytes(dbKey), memStream.toByteArray());
} catch (DBException e) {
throw new IOException(e);
}
}
@Override
public void removeTokenMasterKey(DelegationKey masterKey)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing master key " + masterKey.getKeyId());
}
String dbKey = getTokenMasterKeyDatabaseKey(masterKey);
try {
db.delete(bytes(dbKey));
} catch (DBException e) {
throw new IOException(e);
}
}
private String getTokenMasterKeyDatabaseKey(DelegationKey masterKey) {
return TOKEN_MASTER_KEY_KEY_PREFIX + masterKey.getKeyId();
}
private Path createStorageDir(Configuration conf) throws IOException {
String confPath = conf.get(JHAdminConfig.MR_HS_LEVELDB_STATE_STORE_PATH);
if (confPath == null) {
throw new IOException("No store location directory configured in " +
JHAdminConfig.MR_HS_LEVELDB_STATE_STORE_PATH);
}
Path root = new Path(confPath, DB_NAME);
FileSystem fs = FileSystem.getLocal(conf);
fs.mkdirs(root, new FsPermission((short)0700));
return root;
}
Version loadVersion() throws IOException {
byte[] data = db.get(bytes(DB_SCHEMA_VERSION_KEY));
// if version is not stored previously, treat it as 1.0.
if (data == null || data.length == 0) {
return Version.newInstance(1, 0);
}
Version version =
new VersionPBImpl(VersionProto.parseFrom(data));
return version;
}
private void storeVersion() throws IOException {
dbStoreVersion(CURRENT_VERSION_INFO);
}
void dbStoreVersion(Version state) throws IOException {
String key = DB_SCHEMA_VERSION_KEY;
byte[] data =
((VersionPBImpl) state).getProto().toByteArray();
try {
db.put(bytes(key), data);
} catch (DBException e) {
throw new IOException(e);
}
}
Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning scheme: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of state-store is a major upgrade, and any
* compatible change of state-store is a minor upgrade.
* 3) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade state or remove incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded state version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing state version info " + getCurrentVersion());
storeVersion();
} else {
throw new IOException(
"Incompatible version for state: expecting state version "
+ getCurrentVersion() + ", but loading version " + loadedVersion);
}
}
private static class LeveldbLogger implements Logger {
private static final Log LOG = LogFactory.getLog(LeveldbLogger.class);
@Override
public void log(String message) {
LOG.info(message);
}
}
}
| 12,063
| 30.747368
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.yarn.util.Records;
public class CompletedTask implements Task {
private static final Counters EMPTY_COUNTERS = new Counters();
private final TaskId taskId;
private final TaskInfo taskInfo;
private TaskReport report;
private TaskAttemptId successfulAttempt;
private List<String> reportDiagnostics = new LinkedList<String>();
private Lock taskAttemptsLock = new ReentrantLock();
private AtomicBoolean taskAttemptsLoaded = new AtomicBoolean(false);
private final Map<TaskAttemptId, TaskAttempt> attempts =
new LinkedHashMap<TaskAttemptId, TaskAttempt>();
CompletedTask(TaskId taskId, TaskInfo taskInfo) {
//TODO JobHistoryParser.handleTaskFailedAttempt should use state from the event.
this.taskInfo = taskInfo;
this.taskId = taskId;
}
@Override
public boolean canCommit(TaskAttemptId taskAttemptID) {
return false;
}
@Override
public TaskAttempt getAttempt(TaskAttemptId attemptID) {
loadAllTaskAttempts();
return attempts.get(attemptID);
}
@Override
public Map<TaskAttemptId, TaskAttempt> getAttempts() {
loadAllTaskAttempts();
return attempts;
}
@Override
public Counters getCounters() {
return taskInfo.getCounters();
}
@Override
public TaskId getID() {
return taskId;
}
@Override
public float getProgress() {
return 1.0f;
}
@Override
public synchronized TaskReport getReport() {
if (report == null) {
constructTaskReport();
}
return report;
}
@Override
public TaskType getType() {
return TypeConverter.toYarn(taskInfo.getTaskType());
}
@Override
public boolean isFinished() {
return true;
}
@Override
public TaskState getState() {
return taskInfo.getTaskStatus() == null ? TaskState.KILLED : TaskState
.valueOf(taskInfo.getTaskStatus());
}
private void constructTaskReport() {
loadAllTaskAttempts();
this.report = Records.newRecord(TaskReport.class);
report.setTaskId(taskId);
long minLaunchTime = Long.MAX_VALUE;
for(TaskAttempt attempt: attempts.values()) {
minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime());
}
minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime;
report.setStartTime(minLaunchTime);
report.setFinishTime(taskInfo.getFinishTime());
report.setTaskState(getState());
report.setProgress(getProgress());
Counters counters = getCounters();
if (counters == null) {
counters = EMPTY_COUNTERS;
}
report.setRawCounters(counters);
if (successfulAttempt != null) {
report.setSuccessfulAttempt(successfulAttempt);
}
report.addAllDiagnostics(reportDiagnostics);
report
.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
}
private void loadAllTaskAttempts() {
if (taskAttemptsLoaded.get()) {
return;
}
taskAttemptsLock.lock();
try {
if (taskAttemptsLoaded.get()) {
return;
}
for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
.values()) {
CompletedTaskAttempt attempt =
new CompletedTaskAttempt(taskId, attemptHistory);
reportDiagnostics.addAll(attempt.getDiagnostics());
attempts.put(attempt.getID(), attempt);
if (successfulAttempt == null
&& attemptHistory.getTaskStatus() != null
&& attemptHistory.getTaskStatus().equals(
TaskState.SUCCEEDED.toString())) {
successfulAttempt =
TypeConverter.toYarn(attemptHistory.getAttemptId());
}
}
taskAttemptsLoaded.set(true);
} finally {
taskAttemptsLock.unlock();
}
}
}
| 5,491
| 30.028249
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryServerNullStateStoreService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.DelegationKey;
@Private
@Unstable
public class HistoryServerNullStateStoreService
extends HistoryServerStateStoreService {
@Override
protected void initStorage(Configuration conf) throws IOException {
// Do nothing
}
@Override
protected void startStorage() throws IOException {
// Do nothing
}
@Override
protected void closeStorage() throws IOException {
// Do nothing
}
@Override
public HistoryServerState loadState() throws IOException {
throw new UnsupportedOperationException(
"Cannot load state from null store");
}
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
// Do nothing
}
@Override
public void updateToken(MRDelegationTokenIdentifier tokenId, Long renewDate)
throws IOException {
// Do nothing
}
@Override
public void removeToken(MRDelegationTokenIdentifier tokenId)
throws IOException {
// Do nothing
}
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
// Do nothing
}
@Override
public void removeTokenMasterKey(DelegationKey key) throws IOException {
// Do nothing
}
}
| 2,388
| 27.783133
| 78
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.