repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestLocalContainerLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; public class TestLocalContainerLauncher { private static final Log LOG = LogFactory.getLog(TestLocalContainerLauncher.class); private static File testWorkDir; private static final String[] localDirs = new String[2]; private static void delete(File dir) throws IOException { Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); Path p = fs.makeQualified(new Path(dir.getAbsolutePath())); fs.delete(p, true); } @BeforeClass public static void setupTestDirs() throws IOException { testWorkDir = new File("target", TestLocalContainerLauncher.class.getCanonicalName()); testWorkDir.delete(); testWorkDir.mkdirs(); testWorkDir = testWorkDir.getAbsoluteFile(); for (int i = 0; i < localDirs.length; i++) { final File dir = new File(testWorkDir, "local-" + i); dir.mkdirs(); localDirs[i] = dir.toString(); } } @AfterClass public static void cleanupTestDirs() throws IOException { if (testWorkDir != null) { delete(testWorkDir); } } @SuppressWarnings("rawtypes") @Test(timeout=10000) public void testKillJob() throws Exception { JobConf conf = new JobConf(); AppContext context = mock(AppContext.class); // a simple event handler solely to detect the container cleaned event final CountDownLatch isDone = new CountDownLatch(1); EventHandler handler = new EventHandler() { @Override public void handle(Event event) { LOG.info("handling event " + event.getClass() + " with type " + event.getType()); if (event instanceof TaskAttemptEvent) { if (event.getType() == TaskAttemptEventType.TA_CONTAINER_CLEANED) { isDone.countDown(); } } } }; when(context.getEventHandler()).thenReturn(handler); // create and start the launcher LocalContainerLauncher launcher = new LocalContainerLauncher(context, mock(TaskUmbilicalProtocol.class)); launcher.init(conf); launcher.start(); // create mocked job, task, and task attempt // a single-mapper job JobId jobId = MRBuilderUtils.newJobId(System.currentTimeMillis(), 1, 1); TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP); TaskAttemptId taId = MRBuilderUtils.newTaskAttemptId(taskId, 0); Job job = mock(Job.class); when(job.getTotalMaps()).thenReturn(1); when(job.getTotalReduces()).thenReturn(0); Map<JobId,Job> jobs = new HashMap<JobId,Job>(); jobs.put(jobId, job); // app context returns the one and only job when(context.getAllJobs()).thenReturn(jobs); org.apache.hadoop.mapreduce.v2.app.job.Task ytask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class); when(ytask.getType()).thenReturn(TaskType.MAP); when(job.getTask(taskId)).thenReturn(ytask); // create a sleeping mapper that runs beyond the test timeout MapTask mapTask = mock(MapTask.class); when(mapTask.isMapOrReduce()).thenReturn(true); when(mapTask.isMapTask()).thenReturn(true); TaskAttemptID taskID = TypeConverter.fromYarn(taId); when(mapTask.getTaskID()).thenReturn(taskID); when(mapTask.getJobID()).thenReturn(taskID.getJobID()); doAnswer(new Answer<Void>() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { // sleep for a long time LOG.info("sleeping for 5 minutes..."); Thread.sleep(5*60*1000); return null; } }).when(mapTask).run(isA(JobConf.class), isA(TaskUmbilicalProtocol.class)); // pump in a task attempt launch event ContainerLauncherEvent launchEvent = new ContainerRemoteLaunchEvent(taId, null, createMockContainer(), mapTask); launcher.handle(launchEvent); Thread.sleep(200); // now pump in a container clean-up event ContainerLauncherEvent cleanupEvent = new ContainerLauncherEvent(taId, null, null, null, ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP); launcher.handle(cleanupEvent); // wait for the event to fire: this should be received promptly isDone.await(); launcher.close(); } private static Container createMockContainer() { Container container = mock(Container.class); NodeId nodeId = NodeId.newInstance("foo.bar.org", 1234); when(container.getNodeId()).thenReturn(nodeId); return container; } @Test public void testRenameMapOutputForReduce() throws Exception { final JobConf conf = new JobConf(); final MROutputFiles mrOutputFiles = new MROutputFiles(); mrOutputFiles.setConf(conf); // make sure both dirs are distinct // conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString()); final Path mapOut = mrOutputFiles.getOutputFileForWrite(1); conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString()); final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1); Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent()); // make both dirs part of LOCAL_DIR conf.setStrings(MRConfig.LOCAL_DIR, localDirs); final FileContext lfc = FileContext.getLocalFSFileContext(conf); lfc.create(mapOut, EnumSet.of(CREATE)).close(); lfc.create(mapOutIdx, EnumSet.of(CREATE)).close(); final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2); final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0); LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles); } }
8,313
37.137615
83
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptFinishingMonitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.TaskAttemptFinishingMonitor; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.SystemClock; import org.junit.Test; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class TestTaskAttemptFinishingMonitor { @Test public void testFinshingAttemptTimeout() throws IOException, InterruptedException { SystemClock clock = new SystemClock(); Configuration conf = new Configuration(); conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT, 100); conf.setInt(MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, 10); AppContext appCtx = mock(AppContext.class); JobTokenSecretManager secret = mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class); MockEventHandler eventHandler = new MockEventHandler(); TaskAttemptFinishingMonitor taskAttemptFinishingMonitor = new TaskAttemptFinishingMonitor(eventHandler); taskAttemptFinishingMonitor.init(conf); taskAttemptFinishingMonitor.start(); when(appCtx.getEventHandler()).thenReturn(eventHandler); when(appCtx.getNMHostname()).thenReturn("0.0.0.0"); when(appCtx.getTaskAttemptFinishingMonitor()).thenReturn( taskAttemptFinishingMonitor); when(appCtx.getClock()).thenReturn(clock); TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, null); listener.init(conf); listener.start(); JobId jid = MRBuilderUtils.newJobId(12345, 1, 1); TaskId tid = MRBuilderUtils.newTaskId(jid, 0, org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0); appCtx.getTaskAttemptFinishingMonitor().register(attemptId); int check = 0; while ( !eventHandler.timedOut && check++ < 10 ) { Thread.sleep(100); } taskAttemptFinishingMonitor.stop(); assertTrue("Finishing attempt didn't time out.", eventHandler.timedOut); } public static class MockEventHandler implements EventHandler { public boolean timedOut = false; @Override public void handle(Event event) { if (event instanceof TaskAttemptEvent) { TaskAttemptEvent attemptEvent = ((TaskAttemptEvent) event); if (TaskAttemptEventType.TA_TIMED_OUT == attemptEvent.getType()) { timedOut = true; } } } }; }
4,082
37.518868
78
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapred; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Arrays; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.util.SystemClock; import org.junit.Test; public class TestTaskAttemptListenerImpl { public static class MockTaskAttemptListenerImpl extends TaskAttemptListenerImpl { public MockTaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler) { super(context, jobTokenSecretManager, rmHeartbeatHandler, null); } public MockTaskAttemptListenerImpl(AppContext context, JobTokenSecretManager jobTokenSecretManager, RMHeartbeatHandler rmHeartbeatHandler, TaskHeartbeatHandler hbHandler) { super(context, jobTokenSecretManager, rmHeartbeatHandler, null); this.taskHeartbeatHandler = hbHandler; } @Override protected void registerHeartbeatHandler(Configuration conf) { //Empty } @Override protected void startRpcServer() { //Empty } @Override protected void stopRpcServer() { //Empty } } @Test (timeout=5000) public void testGetTask() throws IOException { AppContext appCtx = mock(AppContext.class); JobTokenSecretManager secret = mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class); TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class); MockTaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler, hbHandler); Configuration conf = new Configuration(); listener.init(conf); listener.start(); JVMId id = new JVMId("foo",1, true, 1); WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId()); // Verify ask before registration. //The JVM ID has not been registered yet so we should kill it. JvmContext context = new JvmContext(); context.jvmId = id; JvmTask result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); // Verify ask after registration but before launch. // Don't kill, should be null. TaskAttemptId attemptID = mock(TaskAttemptId.class); Task task = mock(Task.class); //Now put a task with the ID listener.registerPendingTask(task, wid); result = listener.getTask(context); assertNull(result); // Unregister for more testing. listener.unregister(attemptID, wid); // Verify ask after registration and launch //Now put a task with the ID listener.registerPendingTask(task, wid); listener.registerLaunchedTask(attemptID, wid); verify(hbHandler).register(attemptID); result = listener.getTask(context); assertNotNull(result); assertFalse(result.shouldDie); // Don't unregister yet for more testing. //Verify that if we call it again a second time we are told to die. result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.unregister(attemptID, wid); // Verify after unregistration. result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.stop(); // test JVMID JVMId jvmid = JVMId.forName("jvm_001_002_m_004"); assertNotNull(jvmid); try { JVMId.forName("jvm_001_002_m_004_006"); Assert.fail(); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "TaskId string : jvm_001_002_m_004_006 is not properly formed"); } } @Test (timeout=5000) public void testJVMId() { JVMId jvmid = new JVMId("test", 1, true, 2); JVMId jvmid1 = JVMId.forName("jvm_test_0001_m_000002"); // test compare methot should be the same assertEquals(0, jvmid.compareTo(jvmid1)); } @Test (timeout=10000) public void testGetMapCompletionEvents() throws IOException { TaskAttemptCompletionEvent[] empty = {}; TaskAttemptCompletionEvent[] taskEvents = { createTce(0, true, TaskAttemptCompletionEventStatus.OBSOLETE), createTce(1, false, TaskAttemptCompletionEventStatus.FAILED), createTce(2, true, TaskAttemptCompletionEventStatus.SUCCEEDED), createTce(3, false, TaskAttemptCompletionEventStatus.FAILED) }; TaskAttemptCompletionEvent[] mapEvents = { taskEvents[0], taskEvents[2] }; Job mockJob = mock(Job.class); when(mockJob.getTaskAttemptCompletionEvents(0, 100)) .thenReturn(taskEvents); when(mockJob.getTaskAttemptCompletionEvents(0, 2)) .thenReturn(Arrays.copyOfRange(taskEvents, 0, 2)); when(mockJob.getTaskAttemptCompletionEvents(2, 100)) .thenReturn(Arrays.copyOfRange(taskEvents, 2, 4)); when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn( TypeConverter.fromYarn(mapEvents)); when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn( TypeConverter.fromYarn(mapEvents)); when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn( TypeConverter.fromYarn(empty)); AppContext appCtx = mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); JobTokenSecretManager secret = mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class); final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class); TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler) { @Override protected void registerHeartbeatHandler(Configuration conf) { taskHeartbeatHandler = hbHandler; } }; Configuration conf = new Configuration(); listener.init(conf); listener.start(); JobID jid = new JobID("12345", 1); TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0); MapTaskCompletionEventsUpdate update = listener.getMapCompletionEvents(jid, 0, 100, tid); assertEquals(2, update.events.length); update = listener.getMapCompletionEvents(jid, 0, 2, tid); assertEquals(2, update.events.length); update = listener.getMapCompletionEvents(jid, 2, 100, tid); assertEquals(0, update.events.length); } private static TaskAttemptCompletionEvent createTce(int eventId, boolean isMap, TaskAttemptCompletionEventStatus status) { JobId jid = MRBuilderUtils.newJobId(12345, 1, 1); TaskId tid = MRBuilderUtils.newTaskId(jid, 0, isMap ? org.apache.hadoop.mapreduce.v2.api.records.TaskType.MAP : org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE); TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(tid, 0); RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); TaskAttemptCompletionEvent tce = recordFactory .newRecordInstance(TaskAttemptCompletionEvent.class); tce.setEventId(eventId); tce.setAttemptId(attemptId); tce.setStatus(status); return tce; } @Test (timeout=10000) public void testCommitWindow() throws IOException { SystemClock clock = new SystemClock(); org.apache.hadoop.mapreduce.v2.app.job.Task mockTask = mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class); when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true); Job mockJob = mock(Job.class); when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask); AppContext appCtx = mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); when(appCtx.getClock()).thenReturn(clock); JobTokenSecretManager secret = mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler = mock(RMHeartbeatHandler.class); final TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class); TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(appCtx, secret, rmHeartbeatHandler) { @Override protected void registerHeartbeatHandler(Configuration conf) { taskHeartbeatHandler = hbHandler; } }; Configuration conf = new Configuration(); listener.init(conf); listener.start(); // verify commit not allowed when RM heartbeat has not occurred recently TaskAttemptID tid = new TaskAttemptID("12345", 1, TaskType.REDUCE, 1, 0); boolean canCommit = listener.canCommit(tid); assertFalse(canCommit); verify(mockTask, never()).canCommit(any(TaskAttemptId.class)); // verify commit allowed when RM heartbeat is recent when(rmHeartbeatHandler.getLastHeartbeatTime()).thenReturn(clock.getTime()); canCommit = listener.canCommit(tid); assertTrue(canCommit); verify(mockTask, times(1)).canCommit(any(TaskAttemptId.class)); listener.stop(); } }
11,031
38.4
83
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/yarn/webapp/view/BlockForTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.webapp.view; import java.io.PrintWriter; /** * BlockForTest publishes constructor for test */ public class BlockForTest extends HtmlBlock.Block { public BlockForTest(HtmlBlock htmlBlock, PrintWriter out, int level, boolean wasInline) { htmlBlock.super(out, level, wasInline); } }
1,147
30.888889
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/CustomOutputCommitter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop; import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapred.OutputCommitter; import org.apache.hadoop.mapred.TaskAttemptContext; public class CustomOutputCommitter extends OutputCommitter { public static final String JOB_SETUP_FILE_NAME = "_job_setup"; public static final String JOB_COMMIT_FILE_NAME = "_job_commit"; public static final String JOB_ABORT_FILE_NAME = "_job_abort"; public static final String TASK_SETUP_FILE_NAME = "_task_setup"; public static final String TASK_ABORT_FILE_NAME = "_task_abort"; public static final String TASK_COMMIT_FILE_NAME = "_task_commit"; @Override public void setupJob(JobContext jobContext) throws IOException { writeFile(jobContext.getJobConf(), JOB_SETUP_FILE_NAME); } @Override public void commitJob(JobContext jobContext) throws IOException { super.commitJob(jobContext); writeFile(jobContext.getJobConf(), JOB_COMMIT_FILE_NAME); } @Override public void abortJob(JobContext jobContext, int status) throws IOException { super.abortJob(jobContext, status); writeFile(jobContext.getJobConf(), JOB_ABORT_FILE_NAME); } @Override public void setupTask(TaskAttemptContext taskContext) throws IOException { writeFile(taskContext.getJobConf(), TASK_SETUP_FILE_NAME); } @Override public boolean needsTaskCommit(TaskAttemptContext taskContext) throws IOException { return true; } @Override public void commitTask(TaskAttemptContext taskContext) throws IOException { writeFile(taskContext.getJobConf(), TASK_COMMIT_FILE_NAME); } @Override public void abortTask(TaskAttemptContext taskContext) throws IOException { writeFile(taskContext.getJobConf(), TASK_ABORT_FILE_NAME); } private void writeFile(JobConf conf , String filename) throws IOException { System.out.println("writing file ----" + filename); Path outputPath = FileOutputFormat.getOutputPath(conf); FileSystem fs = outputPath.getFileSystem(conf); fs.create(new Path(outputPath, filename)).close(); } }
3,072
34.732558
77
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.jobhistory; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.util.StringUtils; public class JobSummary { private JobId jobId; private long jobSubmitTime; private long jobLaunchTime; private long firstMapTaskLaunchTime; // MapAttempteStarted | // TaskAttemptStartEvent private long firstReduceTaskLaunchTime; // ReduceAttemptStarted | // TaskAttemptStartEvent private long jobFinishTime; private int numFinishedMaps; private int numFailedMaps; private int numFinishedReduces; private int numFailedReduces; private int resourcesPerMap; // resources used per map/min resource private int resourcesPerReduce; // resources used per reduce/min resource // resource models // private int numSlotsPerReduce; | Doesn't make sense with potentially // different resource models private String user; private String queue; private String jobStatus; private long mapSlotSeconds; // TODO Not generated yet in MRV2 private long reduceSlotSeconds; // TODO Not generated yet MRV2 // private int clusterSlotCapacity; private String jobName; JobSummary() { } public JobId getJobId() { return jobId; } public void setJobId(JobId jobId) { this.jobId = jobId; } public long getJobSubmitTime() { return jobSubmitTime; } public void setJobSubmitTime(long jobSubmitTime) { this.jobSubmitTime = jobSubmitTime; } public long getJobLaunchTime() { return jobLaunchTime; } public void setJobLaunchTime(long jobLaunchTime) { this.jobLaunchTime = jobLaunchTime; } public long getFirstMapTaskLaunchTime() { return firstMapTaskLaunchTime; } public void setFirstMapTaskLaunchTime(long firstMapTaskLaunchTime) { this.firstMapTaskLaunchTime = firstMapTaskLaunchTime; } public long getFirstReduceTaskLaunchTime() { return firstReduceTaskLaunchTime; } public void setFirstReduceTaskLaunchTime(long firstReduceTaskLaunchTime) { this.firstReduceTaskLaunchTime = firstReduceTaskLaunchTime; } public long getJobFinishTime() { return jobFinishTime; } public void setJobFinishTime(long jobFinishTime) { this.jobFinishTime = jobFinishTime; } public int getNumFinishedMaps() { return numFinishedMaps; } public void setNumFinishedMaps(int numFinishedMaps) { this.numFinishedMaps = numFinishedMaps; } public int getNumFailedMaps() { return numFailedMaps; } public void setNumFailedMaps(int numFailedMaps) { this.numFailedMaps = numFailedMaps; } public int getResourcesPerMap() { return resourcesPerMap; } public void setResourcesPerMap(int resourcesPerMap) { this.resourcesPerMap = resourcesPerMap; } public int getNumFinishedReduces() { return numFinishedReduces; } public void setNumFinishedReduces(int numFinishedReduces) { this.numFinishedReduces = numFinishedReduces; } public int getNumFailedReduces() { return numFailedReduces; } public void setNumFailedReduces(int numFailedReduces) { this.numFailedReduces = numFailedReduces; } public int getResourcesPerReduce() { return this.resourcesPerReduce; } public void setResourcesPerReduce(int resourcesPerReduce) { this.resourcesPerReduce = resourcesPerReduce; } public String getUser() { return user; } public void setUser(String user) { this.user = user; } public String getQueue() { return queue; } public void setQueue(String queue) { this.queue = queue; } public String getJobStatus() { return jobStatus; } public void setJobStatus(String jobStatus) { this.jobStatus = jobStatus; } public long getMapSlotSeconds() { return mapSlotSeconds; } public void setMapSlotSeconds(long mapSlotSeconds) { this.mapSlotSeconds = mapSlotSeconds; } public long getReduceSlotSeconds() { return reduceSlotSeconds; } public void setReduceSlotSeconds(long reduceSlotSeconds) { this.reduceSlotSeconds = reduceSlotSeconds; } public String getJobName() { return jobName; } public void setJobName(String jobName) { this.jobName = jobName; } public String getJobSummaryString() { SummaryBuilder summary = new SummaryBuilder() .add("jobId", jobId) .add("submitTime", jobSubmitTime) .add("launchTime", jobLaunchTime) .add("firstMapTaskLaunchTime", firstMapTaskLaunchTime) .add("firstReduceTaskLaunchTime", firstReduceTaskLaunchTime) .add("finishTime", jobFinishTime) .add("resourcesPerMap", resourcesPerMap) .add("resourcesPerReduce", resourcesPerReduce) .add("numMaps", numFinishedMaps + numFailedMaps) .add("numReduces", numFinishedReduces + numFailedReduces) .add("user", user) .add("queue", queue) .add("status", jobStatus) .add("mapSlotSeconds", mapSlotSeconds) .add("reduceSlotSeconds", reduceSlotSeconds) .add("jobName", jobName); return summary.toString(); } static final char EQUALS = '='; static final char[] charsToEscape = { StringUtils.COMMA, EQUALS, StringUtils.ESCAPE_CHAR }; static class SummaryBuilder { final StringBuilder buffer = new StringBuilder(); // A little optimization for a very common case SummaryBuilder add(String key, long value) { return _add(key, Long.toString(value)); } <T> SummaryBuilder add(String key, T value) { String escapedString = StringUtils.escapeString(String.valueOf(value), StringUtils.ESCAPE_CHAR, charsToEscape).replaceAll("\n", "\\\\n") .replaceAll("\r", "\\\\r"); return _add(key, escapedString); } SummaryBuilder add(SummaryBuilder summary) { if (buffer.length() > 0) buffer.append(StringUtils.COMMA); buffer.append(summary.buffer); return this; } SummaryBuilder _add(String key, String value) { if (buffer.length() > 0) buffer.append(StringUtils.COMMA); buffer.append(key).append(EQUALS).append(value); return this; } @Override public String toString() { return buffer.toString(); } } }
7,084
26.675781
77
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.jobhistory; import java.io.IOException; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.TaskStatus; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.node.ArrayNode; import org.codehaus.jackson.node.ObjectNode; import com.google.common.annotations.VisibleForTesting; /** * The job history events get routed to this class. This class writes the Job * history events to the DFS directly into a staging dir and then moved to a * done-dir. JobHistory implementation is in this package to access package * private classes. */ public class JobHistoryEventHandler extends AbstractService implements EventHandler<JobHistoryEvent> { private final AppContext context; private final int startCount; private int eventCounter; // Those file systems may differ from the job configuration // See org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils // #ensurePathInDefaultFileSystem private FileSystem stagingDirFS; // log Dir FileSystem private FileSystem doneDirFS; // done Dir FileSystem private Path stagingDirPath = null; private Path doneDirPrefixPath = null; // folder for completed jobs private int maxUnflushedCompletionEvents; private int postJobCompletionMultiplier; private long flushTimeout; private int minQueueSizeForBatchingFlushes; // TODO: Rename private int numUnflushedCompletionEvents = 0; private boolean isTimerActive; private EventWriter.WriteMode jhistMode = EventWriter.WriteMode.JSON; protected BlockingQueue<JobHistoryEvent> eventQueue = new LinkedBlockingQueue<JobHistoryEvent>(); protected Thread eventHandlingThread; private volatile boolean stopped; private final Object lock = new Object(); private static final Log LOG = LogFactory.getLog( JobHistoryEventHandler.class); protected static final Map<JobId, MetaInfo> fileMap = Collections.<JobId,MetaInfo>synchronizedMap(new HashMap<JobId,MetaInfo>()); // should job completion be force when the AM shuts down? protected volatile boolean forceJobCompletion = false; protected TimelineClient timelineClient; private static String MAPREDUCE_JOB_ENTITY_TYPE = "MAPREDUCE_JOB"; private static String MAPREDUCE_TASK_ENTITY_TYPE = "MAPREDUCE_TASK"; public JobHistoryEventHandler(AppContext context, int startCount) { super("JobHistoryEventHandler"); this.context = context; this.startCount = startCount; } /* (non-Javadoc) * @see org.apache.hadoop.yarn.service.AbstractService#init(org. * apache.hadoop.conf.Configuration) * Initializes the FileSystem and Path objects for the log and done directories. * Creates these directories if they do not already exist. */ @Override protected void serviceInit(Configuration conf) throws Exception { String jobId = TypeConverter.fromYarn(context.getApplicationID()).toString(); String stagingDirStr = null; String doneDirStr = null; String userDoneDirStr = null; try { stagingDirStr = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId); doneDirStr = JobHistoryUtils.getConfiguredHistoryIntermediateDoneDirPrefix(conf); userDoneDirStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); } catch (IOException e) { LOG.error("Failed while getting the configured log directories", e); throw new YarnRuntimeException(e); } //Check for the existence of the history staging dir. Maybe create it. try { stagingDirPath = FileContext.getFileContext(conf).makeQualified(new Path(stagingDirStr)); stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf); mkdir(stagingDirFS, stagingDirPath, new FsPermission( JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS)); } catch (IOException e) { LOG.error("Failed while checking for/creating history staging path: [" + stagingDirPath + "]", e); throw new YarnRuntimeException(e); } //Check for the existence of intermediate done dir. Path doneDirPath = null; try { doneDirPath = FileContext.getFileContext(conf).makeQualified(new Path(doneDirStr)); doneDirFS = FileSystem.get(doneDirPath.toUri(), conf); // This directory will be in a common location, or this may be a cluster // meant for a single user. Creating based on the conf. Should ideally be // created by the JobHistoryServer or as part of deployment. if (!doneDirFS.exists(doneDirPath)) { if (JobHistoryUtils.shouldCreateNonUserDirectory(conf)) { LOG.info("Creating intermediate history logDir: [" + doneDirPath + "] + based on conf. Should ideally be created by the JobHistoryServer: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR); mkdir( doneDirFS, doneDirPath, new FsPermission( JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS .toShort())); // TODO Temporary toShort till new FsPermission(FsPermissions) // respects // sticky } else { String message = "Not creating intermediate history logDir: [" + doneDirPath + "] based on conf: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR + ". Either set to true or pre-create this directory with" + " appropriate permissions"; LOG.error(message); throw new YarnRuntimeException(message); } } } catch (IOException e) { LOG.error("Failed checking for the existance of history intermediate " + "done directory: [" + doneDirPath + "]"); throw new YarnRuntimeException(e); } //Check/create user directory under intermediate done dir. try { doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(new Path(userDoneDirStr)); mkdir(doneDirFS, doneDirPrefixPath, new FsPermission( JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS)); } catch (IOException e) { LOG.error("Error creating user intermediate history done directory: [ " + doneDirPrefixPath + "]", e); throw new YarnRuntimeException(e); } // Maximum number of unflushed completion-events that can stay in the queue // before flush kicks in. maxUnflushedCompletionEvents = conf.getInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, MRJobConfig.DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS); // We want to cut down flushes after job completes so as to write quicker, // so we increase maxUnflushedEvents post Job completion by using the // following multiplier. postJobCompletionMultiplier = conf.getInt( MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, MRJobConfig.DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER); // Max time until which flush doesn't take place. flushTimeout = conf.getLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS, MRJobConfig.DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS); minQueueSizeForBatchingFlushes = conf.getInt( MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, MRJobConfig.DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD); if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) { if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(conf); LOG.info("Timeline service is enabled"); LOG.info("Emitting job history data to the timeline server is enabled"); } else { LOG.info("Timeline service is not enabled"); } } else { LOG.info("Emitting job history data to the timeline server is not enabled"); } // Flag for setting String jhistFormat = conf.get(JHAdminConfig.MR_HS_JHIST_FORMAT, JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT); if (jhistFormat.equals("json")) { jhistMode = EventWriter.WriteMode.JSON; } else if (jhistFormat.equals("binary")) { jhistMode = EventWriter.WriteMode.BINARY; } else { LOG.warn("Unrecognized value '" + jhistFormat + "' for property " + JHAdminConfig.MR_HS_JHIST_FORMAT + ". Valid values are " + "'json' or 'binary'. Falling back to default value '" + JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT + "'."); } super.serviceInit(conf); } private void mkdir(FileSystem fs, Path path, FsPermission fsp) throws IOException { if (!fs.exists(path)) { try { fs.mkdirs(path, fsp); FileStatus fsStatus = fs.getFileStatus(path); LOG.info("Perms after creating " + fsStatus.getPermission().toShort() + ", Expected: " + fsp.toShort()); if (fsStatus.getPermission().toShort() != fsp.toShort()) { LOG.info("Explicitly setting permissions to : " + fsp.toShort() + ", " + fsp); fs.setPermission(path, fsp); } } catch (FileAlreadyExistsException e) { LOG.info("Directory: [" + path + "] already exists."); } } } @Override protected void serviceStart() throws Exception { if (timelineClient != null) { timelineClient.start(); } eventHandlingThread = new Thread(new Runnable() { @Override public void run() { JobHistoryEvent event = null; while (!stopped && !Thread.currentThread().isInterrupted()) { // Log the size of the history-event-queue every so often. if (eventCounter != 0 && eventCounter % 1000 == 0) { eventCounter = 0; LOG.info("Size of the JobHistory event queue is " + eventQueue.size()); } else { eventCounter++; } try { event = eventQueue.take(); } catch (InterruptedException e) { LOG.info("EventQueue take interrupted. Returning"); return; } // If an event has been removed from the queue. Handle it. // The rest of the queue is handled via stop() // Clear the interrupt status if it's set before calling handleEvent // and set it if it was set before calling handleEvent. // Interrupts received from other threads during handleEvent cannot be // dealth with - Shell.runCommand() ignores them. synchronized (lock) { boolean isInterrupted = Thread.interrupted(); handleEvent(event); if (isInterrupted) { LOG.debug("Event handling interrupted"); Thread.currentThread().interrupt(); } } } } }, "eventHandlingThread"); eventHandlingThread.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { LOG.info("Stopping JobHistoryEventHandler. " + "Size of the outstanding queue size is " + eventQueue.size()); stopped = true; //do not interrupt while event handling is in progress synchronized(lock) { if (eventHandlingThread != null) { LOG.debug("Interrupting Event Handling thread"); eventHandlingThread.interrupt(); } else { LOG.debug("Null event handling thread"); } } try { if (eventHandlingThread != null) { LOG.debug("Waiting for Event Handling thread to complete"); eventHandlingThread.join(); } } catch (InterruptedException ie) { LOG.info("Interrupted Exception while stopping", ie); } // Cancel all timers - so that they aren't invoked during or after // the metaInfo object is wrapped up. for (MetaInfo mi : fileMap.values()) { try { if (LOG.isDebugEnabled()) { LOG.debug("Shutting down timer for " + mi); } mi.shutDownTimer(); } catch (IOException e) { LOG.info("Exception while cancelling delayed flush timer. " + "Likely caused by a failed flush " + e.getMessage()); } } //write all the events remaining in queue Iterator<JobHistoryEvent> it = eventQueue.iterator(); while(it.hasNext()) { JobHistoryEvent ev = it.next(); LOG.info("In stop, writing event " + ev.getType()); handleEvent(ev); } // Process JobUnsuccessfulCompletionEvent for jobIds which still haven't // closed their event writers if(forceJobCompletion) { for (Map.Entry<JobId,MetaInfo> jobIt : fileMap.entrySet()) { JobId toClose = jobIt.getKey(); MetaInfo mi = jobIt.getValue(); if(mi != null && mi.isWriterActive()) { LOG.warn("Found jobId " + toClose + " to have not been closed. Will close"); //Create a JobFinishEvent so that it is written to the job history final Job job = context.getJob(toClose); JobUnsuccessfulCompletionEvent jucEvent = new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(toClose), System.currentTimeMillis(), job.getCompletedMaps(), job.getCompletedReduces(), createJobStateForJobUnsuccessfulCompletionEvent( mi.getForcedJobStateOnShutDown()), job.getDiagnostics()); JobHistoryEvent jfEvent = new JobHistoryEvent(toClose, jucEvent); //Bypass the queue mechanism which might wait. Call the method directly handleEvent(jfEvent); } } } //close all file handles for (MetaInfo mi : fileMap.values()) { try { mi.closeWriter(); } catch (IOException e) { LOG.info("Exception while closing file " + e.getMessage()); } } if (timelineClient != null) { timelineClient.stop(); } LOG.info("Stopped JobHistoryEventHandler. super.stop()"); super.serviceStop(); } protected EventWriter createEventWriter(Path historyFilePath) throws IOException { FSDataOutputStream out = stagingDirFS.create(historyFilePath, true); return new EventWriter(out, this.jhistMode); } /** * Create an event writer for the Job represented by the jobID. * Writes out the job configuration to the log directory. * This should be the first call to history for a job * * @param jobId the jobId. * @param amStartedEvent * @throws IOException */ protected void setupEventWriter(JobId jobId, AMStartedEvent amStartedEvent) throws IOException { if (stagingDirPath == null) { LOG.error("Log Directory is null, returning"); throw new IOException("Missing Log Directory for History"); } MetaInfo oldFi = fileMap.get(jobId); Configuration conf = getConfig(); // TODO Ideally this should be written out to the job dir // (.staging/jobid/files - RecoveryService will need to be patched) Path historyFile = JobHistoryUtils.getStagingJobHistoryFile( stagingDirPath, jobId, startCount); String user = UserGroupInformation.getCurrentUser().getShortUserName(); if (user == null) { throw new IOException( "User is null while setting up jobhistory eventwriter"); } String jobName = context.getJob(jobId).getName(); EventWriter writer = (oldFi == null) ? null : oldFi.writer; Path logDirConfPath = JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId, startCount); if (writer == null) { try { writer = createEventWriter(historyFile); LOG.info("Event Writer setup for JobId: " + jobId + ", File: " + historyFile); } catch (IOException ioe) { LOG.info("Could not create log file: [" + historyFile + "] + for job " + "[" + jobName + "]"); throw ioe; } //Write out conf only if the writer isn't already setup. if (conf != null) { // TODO Ideally this should be written out to the job dir // (.staging/jobid/files - RecoveryService will need to be patched) FSDataOutputStream jobFileOut = null; try { if (logDirConfPath != null) { jobFileOut = stagingDirFS.create(logDirConfPath, true); conf.writeXml(jobFileOut); jobFileOut.close(); } } catch (IOException e) { LOG.info("Failed to write the job configuration file", e); throw e; } } } String queueName = JobConf.DEFAULT_QUEUE_NAME; if (conf != null) { queueName = conf.get(MRJobConfig.QUEUE_NAME, JobConf.DEFAULT_QUEUE_NAME); } MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer, user, jobName, jobId, amStartedEvent.getForcedJobStateOnShutDown(), queueName); fi.getJobSummary().setJobId(jobId); fi.getJobSummary().setJobLaunchTime(amStartedEvent.getStartTime()); fi.getJobSummary().setJobSubmitTime(amStartedEvent.getSubmitTime()); fi.getJobIndexInfo().setJobStartTime(amStartedEvent.getStartTime()); fi.getJobIndexInfo().setSubmitTime(amStartedEvent.getSubmitTime()); fileMap.put(jobId, fi); } /** Close the event writer for this id * @throws IOException */ public void closeWriter(JobId id) throws IOException { try { final MetaInfo mi = fileMap.get(id); if (mi != null) { mi.closeWriter(); } } catch (IOException e) { LOG.error("Error closing writer for JobID: " + id); throw e; } } @Override public void handle(JobHistoryEvent event) { try { if (isJobCompletionEvent(event.getHistoryEvent())) { // When the job is complete, flush slower but write faster. maxUnflushedCompletionEvents = maxUnflushedCompletionEvents * postJobCompletionMultiplier; } eventQueue.put(event); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } } private boolean isJobCompletionEvent(HistoryEvent historyEvent) { if (EnumSet.of(EventType.JOB_FINISHED, EventType.JOB_FAILED, EventType.JOB_KILLED).contains(historyEvent.getEventType())) { return true; } return false; } @Private public void handleEvent(JobHistoryEvent event) { synchronized (lock) { // If this is JobSubmitted Event, setup the writer if (event.getHistoryEvent().getEventType() == EventType.AM_STARTED) { try { AMStartedEvent amStartedEvent = (AMStartedEvent) event.getHistoryEvent(); setupEventWriter(event.getJobID(), amStartedEvent); } catch (IOException ioe) { LOG.error("Error JobHistoryEventHandler in handleEvent: " + event, ioe); throw new YarnRuntimeException(ioe); } } // For all events // (1) Write it out // (2) Process it for JobSummary // (3) Process it for ATS (if enabled) MetaInfo mi = fileMap.get(event.getJobID()); try { HistoryEvent historyEvent = event.getHistoryEvent(); if (! (historyEvent instanceof NormalizedResourceEvent)) { mi.writeEvent(historyEvent); } processEventForJobSummary(event.getHistoryEvent(), mi.getJobSummary(), event.getJobID()); if (timelineClient != null) { processEventForTimelineServer(historyEvent, event.getJobID(), event.getTimestamp()); } if (LOG.isDebugEnabled()) { LOG.debug("In HistoryEventHandler " + event.getHistoryEvent().getEventType()); } } catch (IOException e) { LOG.error("Error writing History Event: " + event.getHistoryEvent(), e); throw new YarnRuntimeException(e); } if (event.getHistoryEvent().getEventType() == EventType.JOB_SUBMITTED) { JobSubmittedEvent jobSubmittedEvent = (JobSubmittedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime()); mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName()); } //initialize the launchTime in the JobIndexInfo of MetaInfo if(event.getHistoryEvent().getEventType() == EventType.JOB_INITED ){ JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime()); } if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) { JobQueueChangeEvent jQueueEvent = (JobQueueChangeEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName()); } // If this is JobFinishedEvent, close the writer and setup the job-index if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) { try { JobFinishedEvent jFinishedEvent = (JobFinishedEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getFinishedMaps()); mi.getJobIndexInfo().setNumReduces( jFinishedEvent.getFinishedReduces()); mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString()); closeEventWriter(event.getJobID()); processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } // In case of JOB_ERROR, only process all the Done files(e.g. job // summary, job history file etc.) if it is last AM retry. if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) { try { JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event.getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps()); mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces()); mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); closeEventWriter(event.getJobID()); if(context.isLastAMRetry()) processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } if (event.getHistoryEvent().getEventType() == EventType.JOB_FAILED || event.getHistoryEvent().getEventType() == EventType.JOB_KILLED) { try { JobUnsuccessfulCompletionEvent jucEvent = (JobUnsuccessfulCompletionEvent) event .getHistoryEvent(); mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime()); mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps()); mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces()); mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); closeEventWriter(event.getJobID()); processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } } } } public void processEventForJobSummary(HistoryEvent event, JobSummary summary, JobId jobId) { // context.getJob could be used for some of this info as well. switch (event.getEventType()) { case JOB_SUBMITTED: JobSubmittedEvent jse = (JobSubmittedEvent) event; summary.setUser(jse.getUserName()); summary.setQueue(jse.getJobQueueName()); summary.setJobSubmitTime(jse.getSubmitTime()); summary.setJobName(jse.getJobName()); break; case NORMALIZED_RESOURCE: NormalizedResourceEvent normalizedResourceEvent = (NormalizedResourceEvent) event; if (normalizedResourceEvent.getTaskType() == TaskType.MAP) { summary.setResourcesPerMap(normalizedResourceEvent.getMemory()); } else if (normalizedResourceEvent.getTaskType() == TaskType.REDUCE) { summary.setResourcesPerReduce(normalizedResourceEvent.getMemory()); } break; case JOB_INITED: JobInitedEvent jie = (JobInitedEvent) event; summary.setJobLaunchTime(jie.getLaunchTime()); break; case MAP_ATTEMPT_STARTED: TaskAttemptStartedEvent mtase = (TaskAttemptStartedEvent) event; if (summary.getFirstMapTaskLaunchTime() == 0) summary.setFirstMapTaskLaunchTime(mtase.getStartTime()); break; case REDUCE_ATTEMPT_STARTED: TaskAttemptStartedEvent rtase = (TaskAttemptStartedEvent) event; if (summary.getFirstReduceTaskLaunchTime() == 0) summary.setFirstReduceTaskLaunchTime(rtase.getStartTime()); break; case JOB_FINISHED: JobFinishedEvent jfe = (JobFinishedEvent) event; summary.setJobFinishTime(jfe.getFinishTime()); summary.setNumFinishedMaps(jfe.getFinishedMaps()); summary.setNumFailedMaps(jfe.getFailedMaps()); summary.setNumFinishedReduces(jfe.getFinishedReduces()); summary.setNumFailedReduces(jfe.getFailedReduces()); if (summary.getJobStatus() == null) summary .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED .toString()); // TODO JOB_FINISHED does not have state. Effectively job history does not // have state about the finished job. setSummarySlotSeconds(summary, jfe.getTotalCounters()); break; case JOB_FAILED: case JOB_KILLED: JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event; summary.setJobStatus(juce.getStatus()); summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps()); summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces()); summary.setJobFinishTime(juce.getFinishTime()); setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters()); break; default: break; } } private void processEventForTimelineServer(HistoryEvent event, JobId jobId, long timestamp) { TimelineEvent tEvent = new TimelineEvent(); tEvent.setEventType(StringUtils.toUpperCase(event.getEventType().name())); tEvent.setTimestamp(timestamp); TimelineEntity tEntity = new TimelineEntity(); switch (event.getEventType()) { case JOB_SUBMITTED: JobSubmittedEvent jse = (JobSubmittedEvent) event; tEvent.addEventInfo("SUBMIT_TIME", jse.getSubmitTime()); tEvent.addEventInfo("QUEUE_NAME", jse.getJobQueueName()); tEvent.addEventInfo("JOB_NAME", jse.getJobName()); tEvent.addEventInfo("USER_NAME", jse.getUserName()); tEvent.addEventInfo("JOB_CONF_PATH", jse.getJobConfPath()); tEvent.addEventInfo("ACLS", jse.getJobAcls()); tEvent.addEventInfo("JOB_QUEUE_NAME", jse.getJobQueueName()); tEvent.addEventInfo("WORKFLOW_ID", jse.getWorkflowId()); tEvent.addEventInfo("WORKLFOW_ID", jse.getWorkflowId()); tEvent.addEventInfo("WORKFLOW_NAME", jse.getWorkflowName()); tEvent.addEventInfo("WORKFLOW_NAME_NAME", jse.getWorkflowNodeName()); tEvent.addEventInfo("WORKFLOW_ADJACENCIES", jse.getWorkflowAdjacencies()); tEvent.addEventInfo("WORKFLOW_TAGS", jse.getWorkflowTags()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_STATUS_CHANGED: JobStatusChangedEvent jsce = (JobStatusChangedEvent) event; tEvent.addEventInfo("STATUS", jsce.getStatus()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_INFO_CHANGED: JobInfoChangeEvent jice = (JobInfoChangeEvent) event; tEvent.addEventInfo("SUBMIT_TIME", jice.getSubmitTime()); tEvent.addEventInfo("LAUNCH_TIME", jice.getLaunchTime()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_INITED: JobInitedEvent jie = (JobInitedEvent) event; tEvent.addEventInfo("START_TIME", jie.getLaunchTime()); tEvent.addEventInfo("STATUS", jie.getStatus()); tEvent.addEventInfo("TOTAL_MAPS", jie.getTotalMaps()); tEvent.addEventInfo("TOTAL_REDUCES", jie.getTotalReduces()); tEvent.addEventInfo("UBERIZED", jie.getUberized()); tEntity.setStartTime(jie.getLaunchTime()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_PRIORITY_CHANGED: JobPriorityChangeEvent jpce = (JobPriorityChangeEvent) event; tEvent.addEventInfo("PRIORITY", jpce.getPriority().toString()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_QUEUE_CHANGED: JobQueueChangeEvent jqe = (JobQueueChangeEvent) event; tEvent.addEventInfo("QUEUE_NAMES", jqe.getJobQueueName()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_FAILED: case JOB_KILLED: case JOB_ERROR: JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event; tEvent.addEventInfo("FINISH_TIME", juce.getFinishTime()); tEvent.addEventInfo("NUM_MAPS", juce.getFinishedMaps()); tEvent.addEventInfo("NUM_REDUCES", juce.getFinishedReduces()); tEvent.addEventInfo("JOB_STATUS", juce.getStatus()); tEvent.addEventInfo("DIAGNOSTICS", juce.getDiagnostics()); tEvent.addEventInfo("FINISHED_MAPS", juce.getFinishedMaps()); tEvent.addEventInfo("FINISHED_REDUCES", juce.getFinishedReduces()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case JOB_FINISHED: JobFinishedEvent jfe = (JobFinishedEvent) event; tEvent.addEventInfo("FINISH_TIME", jfe.getFinishTime()); tEvent.addEventInfo("NUM_MAPS", jfe.getFinishedMaps()); tEvent.addEventInfo("NUM_REDUCES", jfe.getFinishedReduces()); tEvent.addEventInfo("FAILED_MAPS", jfe.getFailedMaps()); tEvent.addEventInfo("FAILED_REDUCES", jfe.getFailedReduces()); tEvent.addEventInfo("FINISHED_MAPS", jfe.getFinishedMaps()); tEvent.addEventInfo("FINISHED_REDUCES", jfe.getFinishedReduces()); tEvent.addEventInfo("MAP_COUNTERS_GROUPS", countersToJSON(jfe.getTotalCounters())); tEvent.addEventInfo("REDUCE_COUNTERS_GROUPS", countersToJSON(jfe.getReduceCounters())); tEvent.addEventInfo("TOTAL_COUNTERS_GROUPS", countersToJSON(jfe.getTotalCounters())); tEvent.addEventInfo("JOB_STATUS", JobState.SUCCEEDED.toString()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; case TASK_STARTED: TaskStartedEvent tse = (TaskStartedEvent) event; tEvent.addEventInfo("TASK_TYPE", tse.getTaskType().toString()); tEvent.addEventInfo("START_TIME", tse.getStartTime()); tEvent.addEventInfo("SPLIT_LOCATIONS", tse.getSplitLocations()); tEntity.addEvent(tEvent); tEntity.setEntityId(tse.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case TASK_FAILED: TaskFailedEvent tfe = (TaskFailedEvent) event; tEvent.addEventInfo("TASK_TYPE", tfe.getTaskType().toString()); tEvent.addEventInfo("STATUS", TaskStatus.State.FAILED.toString()); tEvent.addEventInfo("FINISH_TIME", tfe.getFinishTime()); tEvent.addEventInfo("ERROR", tfe.getError()); tEvent.addEventInfo("FAILED_ATTEMPT_ID", tfe.getFailedAttemptID() == null ? "" : tfe.getFailedAttemptID().toString()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(tfe.getCounters())); tEntity.addEvent(tEvent); tEntity.setEntityId(tfe.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case TASK_UPDATED: TaskUpdatedEvent tue = (TaskUpdatedEvent) event; tEvent.addEventInfo("FINISH_TIME", tue.getFinishTime()); tEntity.addEvent(tEvent); tEntity.setEntityId(tue.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case TASK_FINISHED: TaskFinishedEvent tfe2 = (TaskFinishedEvent) event; tEvent.addEventInfo("TASK_TYPE", tfe2.getTaskType().toString()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(tfe2.getCounters())); tEvent.addEventInfo("FINISH_TIME", tfe2.getFinishTime()); tEvent.addEventInfo("STATUS", TaskStatus.State.SUCCEEDED.toString()); tEvent.addEventInfo("SUCCESSFUL_TASK_ATTEMPT_ID", tfe2.getSuccessfulTaskAttemptId() == null ? "" : tfe2.getSuccessfulTaskAttemptId().toString()); tEntity.addEvent(tEvent); tEntity.setEntityId(tfe2.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case MAP_ATTEMPT_STARTED: case CLEANUP_ATTEMPT_STARTED: case REDUCE_ATTEMPT_STARTED: case SETUP_ATTEMPT_STARTED: TaskAttemptStartedEvent tase = (TaskAttemptStartedEvent) event; tEvent.addEventInfo("TASK_TYPE", tase.getTaskType().toString()); tEvent.addEventInfo("TASK_ATTEMPT_ID", tase.getTaskAttemptId().toString()); tEvent.addEventInfo("START_TIME", tase.getStartTime()); tEvent.addEventInfo("HTTP_PORT", tase.getHttpPort()); tEvent.addEventInfo("TRACKER_NAME", tase.getTrackerName()); tEvent.addEventInfo("TASK_TYPE", tase.getTaskType().toString()); tEvent.addEventInfo("SHUFFLE_PORT", tase.getShufflePort()); tEvent.addEventInfo("CONTAINER_ID", tase.getContainerId() == null ? "" : tase.getContainerId().toString()); tEntity.addEvent(tEvent); tEntity.setEntityId(tase.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case MAP_ATTEMPT_FAILED: case CLEANUP_ATTEMPT_FAILED: case REDUCE_ATTEMPT_FAILED: case SETUP_ATTEMPT_FAILED: case MAP_ATTEMPT_KILLED: case CLEANUP_ATTEMPT_KILLED: case REDUCE_ATTEMPT_KILLED: case SETUP_ATTEMPT_KILLED: TaskAttemptUnsuccessfulCompletionEvent tauce = (TaskAttemptUnsuccessfulCompletionEvent) event; tEvent.addEventInfo("TASK_TYPE", tauce.getTaskType().toString()); tEvent.addEventInfo("TASK_ATTEMPT_ID", tauce.getTaskAttemptId() == null ? "" : tauce.getTaskAttemptId().toString()); tEvent.addEventInfo("FINISH_TIME", tauce.getFinishTime()); tEvent.addEventInfo("ERROR", tauce.getError()); tEvent.addEventInfo("STATUS", tauce.getTaskStatus()); tEvent.addEventInfo("HOSTNAME", tauce.getHostname()); tEvent.addEventInfo("PORT", tauce.getPort()); tEvent.addEventInfo("RACK_NAME", tauce.getRackName()); tEvent.addEventInfo("SHUFFLE_FINISH_TIME", tauce.getFinishTime()); tEvent.addEventInfo("SORT_FINISH_TIME", tauce.getFinishTime()); tEvent.addEventInfo("MAP_FINISH_TIME", tauce.getFinishTime()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(tauce.getCounters())); tEntity.addEvent(tEvent); tEntity.setEntityId(tauce.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case MAP_ATTEMPT_FINISHED: MapAttemptFinishedEvent mafe = (MapAttemptFinishedEvent) event; tEvent.addEventInfo("TASK_TYPE", mafe.getTaskType().toString()); tEvent.addEventInfo("FINISH_TIME", mafe.getFinishTime()); tEvent.addEventInfo("STATUS", mafe.getTaskStatus()); tEvent.addEventInfo("STATE", mafe.getState()); tEvent.addEventInfo("MAP_FINISH_TIME", mafe.getMapFinishTime()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(mafe.getCounters())); tEvent.addEventInfo("HOSTNAME", mafe.getHostname()); tEvent.addEventInfo("PORT", mafe.getPort()); tEvent.addEventInfo("RACK_NAME", mafe.getRackName()); tEvent.addEventInfo("ATTEMPT_ID", mafe.getAttemptId() == null ? "" : mafe.getAttemptId().toString()); tEntity.addEvent(tEvent); tEntity.setEntityId(mafe.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case REDUCE_ATTEMPT_FINISHED: ReduceAttemptFinishedEvent rafe = (ReduceAttemptFinishedEvent) event; tEvent.addEventInfo("TASK_TYPE", rafe.getTaskType().toString()); tEvent.addEventInfo("ATTEMPT_ID", rafe.getAttemptId() == null ? "" : rafe.getAttemptId().toString()); tEvent.addEventInfo("FINISH_TIME", rafe.getFinishTime()); tEvent.addEventInfo("STATUS", rafe.getTaskStatus()); tEvent.addEventInfo("STATE", rafe.getState()); tEvent.addEventInfo("SHUFFLE_FINISH_TIME", rafe.getShuffleFinishTime()); tEvent.addEventInfo("SORT_FINISH_TIME", rafe.getSortFinishTime()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(rafe.getCounters())); tEvent.addEventInfo("HOSTNAME", rafe.getHostname()); tEvent.addEventInfo("PORT", rafe.getPort()); tEvent.addEventInfo("RACK_NAME", rafe.getRackName()); tEntity.addEvent(tEvent); tEntity.setEntityId(rafe.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case SETUP_ATTEMPT_FINISHED: case CLEANUP_ATTEMPT_FINISHED: TaskAttemptFinishedEvent tafe = (TaskAttemptFinishedEvent) event; tEvent.addEventInfo("TASK_TYPE", tafe.getTaskType().toString()); tEvent.addEventInfo("ATTEMPT_ID", tafe.getAttemptId() == null ? "" : tafe.getAttemptId().toString()); tEvent.addEventInfo("FINISH_TIME", tafe.getFinishTime()); tEvent.addEventInfo("STATUS", tafe.getTaskStatus()); tEvent.addEventInfo("STATE", tafe.getState()); tEvent.addEventInfo("COUNTERS_GROUPS", countersToJSON(tafe.getCounters())); tEvent.addEventInfo("HOSTNAME", tafe.getHostname()); tEntity.addEvent(tEvent); tEntity.setEntityId(tafe.getTaskId().toString()); tEntity.setEntityType(MAPREDUCE_TASK_ENTITY_TYPE); tEntity.addRelatedEntity(MAPREDUCE_JOB_ENTITY_TYPE, jobId.toString()); break; case AM_STARTED: AMStartedEvent ase = (AMStartedEvent) event; tEvent.addEventInfo("APPLICATION_ATTEMPT_ID", ase.getAppAttemptId() == null ? "" : ase.getAppAttemptId().toString()); tEvent.addEventInfo("CONTAINER_ID", ase.getContainerId() == null ? "" : ase.getContainerId().toString()); tEvent.addEventInfo("NODE_MANAGER_HOST", ase.getNodeManagerHost()); tEvent.addEventInfo("NODE_MANAGER_PORT", ase.getNodeManagerPort()); tEvent.addEventInfo("NODE_MANAGER_HTTP_PORT", ase.getNodeManagerHttpPort()); tEvent.addEventInfo("START_TIME", ase.getStartTime()); tEvent.addEventInfo("SUBMIT_TIME", ase.getSubmitTime()); tEntity.addEvent(tEvent); tEntity.setEntityId(jobId.toString()); tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE); break; default: break; } try { timelineClient.putEntities(tEntity); } catch (IOException ex) { LOG.error("Error putting entity " + tEntity.getEntityId() + " to Timeline" + "Server", ex); } catch (YarnException ex) { LOG.error("Error putting entity " + tEntity.getEntityId() + " to Timeline" + "Server", ex); } } @Private public JsonNode countersToJSON(Counters counters) { ObjectMapper mapper = new ObjectMapper(); ArrayNode nodes = mapper.createArrayNode(); if (counters != null) { for (CounterGroup counterGroup : counters) { ObjectNode groupNode = nodes.addObject(); groupNode.put("NAME", counterGroup.getName()); groupNode.put("DISPLAY_NAME", counterGroup.getDisplayName()); ArrayNode countersNode = groupNode.putArray("COUNTERS"); for (Counter counter : counterGroup) { ObjectNode counterNode = countersNode.addObject(); counterNode.put("NAME", counter.getName()); counterNode.put("DISPLAY_NAME", counter.getDisplayName()); counterNode.put("VALUE", counter.getValue()); } } } return nodes; } private void setSummarySlotSeconds(JobSummary summary, Counters allCounters) { Counter slotMillisMapCounter = allCounters .findCounter(JobCounter.SLOTS_MILLIS_MAPS); if (slotMillisMapCounter != null) { summary.setMapSlotSeconds(slotMillisMapCounter.getValue() / 1000); } Counter slotMillisReduceCounter = allCounters .findCounter(JobCounter.SLOTS_MILLIS_REDUCES); if (slotMillisReduceCounter != null) { summary.setReduceSlotSeconds(slotMillisReduceCounter.getValue() / 1000); } } protected void closeEventWriter(JobId jobId) throws IOException { final MetaInfo mi = fileMap.get(jobId); if (mi == null) { throw new IOException("No MetaInfo found for JobId: [" + jobId + "]"); } if (!mi.isWriterActive()) { throw new IOException( "Inactive Writer: Likely received multiple JobFinished / " + "JobUnsuccessful events for JobId: [" + jobId + "]"); } // Close the Writer try { mi.closeWriter(); } catch (IOException e) { LOG.error("Error closing writer for JobID: " + jobId); throw e; } } protected void processDoneFiles(JobId jobId) throws IOException { final MetaInfo mi = fileMap.get(jobId); if (mi == null) { throw new IOException("No MetaInfo found for JobId: [" + jobId + "]"); } if (mi.getHistoryFile() == null) { LOG.warn("No file for job-history with " + jobId + " found in cache!"); } if (mi.getConfFile() == null) { LOG.warn("No file for jobconf with " + jobId + " found in cache!"); } // Writing out the summary file. // TODO JH enhancement - reuse this file to store additional indexing info // like ACLs, etc. JHServer can use HDFS append to build an index file // with more info than is available via the filename. Path qualifiedSummaryDoneFile = null; FSDataOutputStream summaryFileOut = null; try { String doneSummaryFileName = getTempFileName(JobHistoryUtils .getIntermediateSummaryFileName(jobId)); qualifiedSummaryDoneFile = doneDirFS.makeQualified(new Path( doneDirPrefixPath, doneSummaryFileName)); summaryFileOut = doneDirFS.create(qualifiedSummaryDoneFile, true); summaryFileOut.writeUTF(mi.getJobSummary().getJobSummaryString()); summaryFileOut.close(); doneDirFS.setPermission(qualifiedSummaryDoneFile, new FsPermission( JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS)); } catch (IOException e) { LOG.info("Unable to write out JobSummaryInfo to [" + qualifiedSummaryDoneFile + "]", e); throw e; } try { // Move historyFile to Done Folder. Path qualifiedDoneFile = null; if (mi.getHistoryFile() != null) { Path historyFile = mi.getHistoryFile(); Path qualifiedLogFile = stagingDirFS.makeQualified(historyFile); int jobNameLimit = getConfig().getInt(JHAdminConfig.MR_HS_JOBNAME_LIMIT, JHAdminConfig.DEFAULT_MR_HS_JOBNAME_LIMIT); String doneJobHistoryFileName = getTempFileName(FileNameIndexUtils.getDoneFileName(mi .getJobIndexInfo(), jobNameLimit)); qualifiedDoneFile = doneDirFS.makeQualified(new Path(doneDirPrefixPath, doneJobHistoryFileName)); moveToDoneNow(qualifiedLogFile, qualifiedDoneFile); } // Move confFile to Done Folder Path qualifiedConfDoneFile = null; if (mi.getConfFile() != null) { Path confFile = mi.getConfFile(); Path qualifiedConfFile = stagingDirFS.makeQualified(confFile); String doneConfFileName = getTempFileName(JobHistoryUtils .getIntermediateConfFileName(jobId)); qualifiedConfDoneFile = doneDirFS.makeQualified(new Path(doneDirPrefixPath, doneConfFileName)); moveToDoneNow(qualifiedConfFile, qualifiedConfDoneFile); } moveTmpToDone(qualifiedSummaryDoneFile); moveTmpToDone(qualifiedConfDoneFile); moveTmpToDone(qualifiedDoneFile); } catch (IOException e) { LOG.error("Error closing writer for JobID: " + jobId); throw e; } } private class FlushTimerTask extends TimerTask { private MetaInfo metaInfo; private IOException ioe = null; private volatile boolean shouldRun = true; FlushTimerTask(MetaInfo metaInfo) { this.metaInfo = metaInfo; } @Override public void run() { LOG.debug("In flush timer task"); synchronized (lock) { try { if (!metaInfo.isTimerShutDown() && shouldRun) metaInfo.flush(); } catch (IOException e) { ioe = e; } } } public IOException getException() { return ioe; } public void stop() { shouldRun = false; this.cancel(); } } protected class MetaInfo { private Path historyFile; private Path confFile; private EventWriter writer; JobIndexInfo jobIndexInfo; JobSummary jobSummary; Timer flushTimer; FlushTimerTask flushTimerTask; private boolean isTimerShutDown = false; private String forcedJobStateOnShutDown; MetaInfo(Path historyFile, Path conf, EventWriter writer, String user, String jobName, JobId jobId, String forcedJobStateOnShutDown, String queueName) { this.historyFile = historyFile; this.confFile = conf; this.writer = writer; this.jobIndexInfo = new JobIndexInfo(-1, -1, user, jobName, jobId, -1, -1, null, queueName); this.jobSummary = new JobSummary(); this.flushTimer = new Timer("FlushTimer", true); this.forcedJobStateOnShutDown = forcedJobStateOnShutDown; } Path getHistoryFile() { return historyFile; } Path getConfFile() { return confFile; } JobIndexInfo getJobIndexInfo() { return jobIndexInfo; } JobSummary getJobSummary() { return jobSummary; } boolean isWriterActive() { return writer != null; } boolean isTimerShutDown() { return isTimerShutDown; } String getForcedJobStateOnShutDown() { return forcedJobStateOnShutDown; } @Override public String toString() { return "Job MetaInfo for "+ jobSummary.getJobId() + " history file " + historyFile; } void closeWriter() throws IOException { LOG.debug("Closing Writer"); synchronized (lock) { if (writer != null) { writer.close(); } writer = null; } } void writeEvent(HistoryEvent event) throws IOException { LOG.debug("Writing event"); synchronized (lock) { if (writer != null) { writer.write(event); processEventForFlush(event); maybeFlush(event); } } } void processEventForFlush(HistoryEvent historyEvent) throws IOException { if (EnumSet.of(EventType.MAP_ATTEMPT_FINISHED, EventType.MAP_ATTEMPT_FAILED, EventType.MAP_ATTEMPT_KILLED, EventType.REDUCE_ATTEMPT_FINISHED, EventType.REDUCE_ATTEMPT_FAILED, EventType.REDUCE_ATTEMPT_KILLED, EventType.TASK_FINISHED, EventType.TASK_FAILED, EventType.JOB_FINISHED, EventType.JOB_FAILED, EventType.JOB_KILLED).contains(historyEvent.getEventType())) { numUnflushedCompletionEvents++; if (!isTimerActive) { resetFlushTimer(); if (!isTimerShutDown) { flushTimerTask = new FlushTimerTask(this); flushTimer.schedule(flushTimerTask, flushTimeout); isTimerActive = true; } } } } void resetFlushTimer() throws IOException { if (flushTimerTask != null) { IOException exception = flushTimerTask.getException(); flushTimerTask.stop(); if (exception != null) { throw exception; } flushTimerTask = null; } isTimerActive = false; } void maybeFlush(HistoryEvent historyEvent) throws IOException { if ((eventQueue.size() < minQueueSizeForBatchingFlushes && numUnflushedCompletionEvents > 0) || numUnflushedCompletionEvents >= maxUnflushedCompletionEvents || isJobCompletionEvent(historyEvent)) { this.flush(); } } void flush() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Flushing " + toString()); } synchronized (lock) { if (numUnflushedCompletionEvents != 0) { // skipped timer cancel. writer.flush(); numUnflushedCompletionEvents = 0; resetFlushTimer(); } } } void shutDownTimer() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Shutting down timer "+ toString()); } synchronized (lock) { isTimerShutDown = true; flushTimer.cancel(); if (flushTimerTask != null && flushTimerTask.getException() != null) { throw flushTimerTask.getException(); } } } } private void moveTmpToDone(Path tmpPath) throws IOException { if (tmpPath != null) { String tmpFileName = tmpPath.getName(); String fileName = getFileNameFromTmpFN(tmpFileName); Path path = new Path(tmpPath.getParent(), fileName); doneDirFS.rename(tmpPath, path); LOG.info("Moved tmp to done: " + tmpPath + " to " + path); } } // TODO If the FS objects are the same, this should be a rename instead of a // copy. private void moveToDoneNow(Path fromPath, Path toPath) throws IOException { // check if path exists, in case of retries it may not exist if (stagingDirFS.exists(fromPath)) { LOG.info("Copying " + fromPath.toString() + " to " + toPath.toString()); // TODO temporarily removing the existing dst if (doneDirFS.exists(toPath)) { doneDirFS.delete(toPath, true); } boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath, false, getConfig()); if (copied) LOG.info("Copied to done location: " + toPath); else LOG.info("copy failed"); doneDirFS.setPermission(toPath, new FsPermission( JobHistoryUtils.HISTORY_INTERMEDIATE_FILE_PERMISSIONS)); } } boolean pathExists(FileSystem fileSys, Path path) throws IOException { return fileSys.exists(path); } private String getTempFileName(String srcFile) { return srcFile + "_tmp"; } private String getFileNameFromTmpFN(String tmpFileName) { //TODO. Some error checking here. return tmpFileName.substring(0, tmpFileName.length()-4); } public void setForcejobCompletion(boolean forceJobCompletion) { this.forceJobCompletion = forceJobCompletion; LOG.info("JobHistoryEventHandler notified that forceJobCompletion is " + forceJobCompletion); } private String createJobStateForJobUnsuccessfulCompletionEvent( String forcedJobStateOnShutDown) { if (forcedJobStateOnShutDown == null || forcedJobStateOnShutDown .isEmpty()) { return JobState.KILLED.toString(); } else if (forcedJobStateOnShutDown.equals( JobStateInternal.ERROR.toString()) || forcedJobStateOnShutDown.equals(JobStateInternal.FAILED.toString())) { return JobState.FAILED.toString(); } else if (forcedJobStateOnShutDown.equals(JobStateInternal.SUCCEEDED .toString())) { return JobState.SUCCEEDED.toString(); } return JobState.KILLED.toString(); } @VisibleForTesting boolean getFlushTimerStatus() { return isTimerActive; } }
56,829
39.190948
89
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.jobhistory; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.yarn.event.AbstractEvent; public class JobHistoryEvent extends AbstractEvent<EventType>{ private final JobId jobID; private final HistoryEvent historyEvent; public JobHistoryEvent(JobId jobID, HistoryEvent historyEvent) { this(jobID, historyEvent, System.currentTimeMillis()); } public JobHistoryEvent(JobId jobID, HistoryEvent historyEvent, long timestamp) { super(historyEvent.getEventType(), timestamp); this.jobID = jobID; this.historyEvent = historyEvent; } public JobId getJobID() { return jobID; } public HistoryEvent getHistoryEvent() { return historyEvent; } }
1,553
31.375
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryCopyService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.jobhistory; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; /** * Reads in history events from the JobHistoryFile and sends them out again * to be recorded. */ public class JobHistoryCopyService extends CompositeService implements HistoryEventHandler { private static final Log LOG = LogFactory.getLog(JobHistoryCopyService.class); private final ApplicationAttemptId applicationAttemptId; private final EventHandler handler; private final JobId jobId; public JobHistoryCopyService(ApplicationAttemptId applicationAttemptId, EventHandler handler) { super("JobHistoryCopyService"); this.applicationAttemptId = applicationAttemptId; this.jobId = TypeConverter.toYarn( TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); this.handler = handler; } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } @Override public void handleEvent(HistoryEvent event) throws IOException { //Skip over the AM Events this is handled elsewhere if (!(event instanceof AMStartedEvent)) { handler.handle(new JobHistoryEvent(jobId, event)); } } @Override protected void serviceStart() throws Exception { try { //TODO should we parse on a background thread??? parse(); } catch (IOException e) { throw new YarnRuntimeException(e); } super.serviceStart(); } private void parse() throws IOException { FSDataInputStream in = null; try { in = getPreviousJobHistoryFileStream(getConfig(), applicationAttemptId); } catch (IOException e) { LOG.warn("error trying to open previous history file. No history data " + "will be copied over.", e); return; } JobHistoryParser parser = new JobHistoryParser(in); parser.parse(this); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } } public static FSDataInputStream getPreviousJobHistoryFileStream( Configuration conf, ApplicationAttemptId applicationAttemptId) throws IOException { FSDataInputStream in = null; Path historyFile = null; String jobId = TypeConverter.fromYarn(applicationAttemptId.getApplicationId()) .toString(); String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId); Path histDirPath = FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir)); FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf); // read the previous history file historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath, jobId, (applicationAttemptId.getAttemptId() - 1))); LOG.info("History file is at " + historyFile); in = fc.open(historyFile); return in; } }
4,455
34.648
92
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app; import org.apache.hadoop.classification.InterfaceAudience;
936
43.619048
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.FileOutputCommitter; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.LocalContainerLauncher; import org.apache.hadoop.mapred.TaskAttemptListenerImpl; import org.apache.hadoop.mapred.TaskLog; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.CryptoUtils; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.counters.Limits; import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent; import org.apache.hadoop.mapreduce.jobhistory.EventReader; import org.apache.hadoop.mapreduce.jobhistory.EventType; import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryCopyService; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; import org.apache.hadoop.mapreduce.security.TokenCache; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.mapreduce.task.JobContextImpl; import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventType; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobStartEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl; import org.apache.hadoop.mapreduce.v2.app.local.LocalContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator; import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerRequestor; import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler; import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator; import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator; import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.SystemClock; import org.apache.log4j.LogManager; import com.google.common.annotations.VisibleForTesting; import javax.crypto.KeyGenerator; /** * The Map-Reduce Application Master. * The state machine is encapsulated in the implementation of Job interface. * All state changes happens via Job interface. Each event * results in a Finite State Transition in Job. * * MR AppMaster is the composition of loosely coupled services. The services * interact with each other via events. The components resembles the * Actors model. The component acts on received event and send out the * events to other components. * This keeps it highly concurrent with no or minimal synchronization needs. * * The events are dispatched by a central Dispatch mechanism. All components * register to the Dispatcher. * * The information is shared across different components using AppContext. */ @SuppressWarnings("rawtypes") public class MRAppMaster extends CompositeService { private static final Log LOG = LogFactory.getLog(MRAppMaster.class); /** * Priority of the MRAppMaster shutdown hook. */ public static final int SHUTDOWN_HOOK_PRIORITY = 30; public static final String INTERMEDIATE_DATA_ENCRYPTION_ALGO = "HmacSHA1"; private Clock clock; private final long startTime; private final long appSubmitTime; private String appName; private final ApplicationAttemptId appAttemptID; private final ContainerId containerID; private final String nmHost; private final int nmPort; private final int nmHttpPort; protected final MRAppMetrics metrics; private Map<TaskId, TaskInfo> completedTasksFromPreviousRun; private List<AMInfo> amInfos; private AppContext context; private Dispatcher dispatcher; private ClientService clientService; private ContainerAllocator containerAllocator; private ContainerLauncher containerLauncher; private EventHandler<CommitterEvent> committerEventHandler; private Speculator speculator; private TaskAttemptListener taskAttemptListener; private JobTokenSecretManager jobTokenSecretManager = new JobTokenSecretManager(); private JobId jobId; private boolean newApiCommitter; private ClassLoader jobClassLoader; private OutputCommitter committer; private JobEventDispatcher jobEventDispatcher; private JobHistoryEventHandler jobHistoryEventHandler; private SpeculatorEventDispatcher speculatorEventDispatcher; private byte[] encryptedSpillKey; // After a task attempt completes from TaskUmbilicalProtocol's point of view, // it will be transitioned to finishing state. // taskAttemptFinishingMonitor is just a timer for attempts in finishing // state. If the attempt stays in finishing state for too long, // taskAttemptFinishingMonitor will notify the attempt via TA_TIMED_OUT // event. private TaskAttemptFinishingMonitor taskAttemptFinishingMonitor; private Job job; private Credentials jobCredentials = new Credentials(); // Filled during init protected UserGroupInformation currentUser; // Will be setup during init @VisibleForTesting protected volatile boolean isLastAMRetry = false; //Something happened and we should shut down right after we start up. boolean errorHappenedShutDown = false; private String shutDownMessage = null; JobStateInternal forcedState = null; private final ScheduledExecutorService logSyncer; private long recoveredJobStartTime = -1L; private static boolean mainStarted = false; @VisibleForTesting protected AtomicBoolean successfullyUnregistered = new AtomicBoolean(false); public MRAppMaster(ApplicationAttemptId applicationAttemptId, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, long appSubmitTime) { this(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, new SystemClock(), appSubmitTime); } public MRAppMaster(ApplicationAttemptId applicationAttemptId, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, Clock clock, long appSubmitTime) { super(MRAppMaster.class.getName()); this.clock = clock; this.startTime = clock.getTime(); this.appSubmitTime = appSubmitTime; this.appAttemptID = applicationAttemptId; this.containerID = containerId; this.nmHost = nmHost; this.nmPort = nmPort; this.nmHttpPort = nmHttpPort; this.metrics = MRAppMetrics.create(); logSyncer = TaskLog.createLogSyncer(); LOG.info("Created MRAppMaster for application " + applicationAttemptId); } protected TaskAttemptFinishingMonitor createTaskAttemptFinishingMonitor( EventHandler eventHandler) { TaskAttemptFinishingMonitor monitor = new TaskAttemptFinishingMonitor(eventHandler); return monitor; } @Override protected void serviceInit(final Configuration conf) throws Exception { // create the job classloader if enabled createJobClassLoader(conf); conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); initJobCredentialsAndUGI(conf); dispatcher = createDispatcher(); addIfService(dispatcher); taskAttemptFinishingMonitor = createTaskAttemptFinishingMonitor(dispatcher.getEventHandler()); addIfService(taskAttemptFinishingMonitor); context = new RunningAppContext(conf, taskAttemptFinishingMonitor); // Job name is the same as the app name util we support DAG of jobs // for an app later appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>"); conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptID.getAttemptId()); newApiCommitter = false; jobId = MRBuilderUtils.newJobId(appAttemptID.getApplicationId(), appAttemptID.getApplicationId().getId()); int numReduceTasks = conf.getInt(MRJobConfig.NUM_REDUCES, 0); if ((numReduceTasks > 0 && conf.getBoolean("mapred.reducer.new-api", false)) || (numReduceTasks == 0 && conf.getBoolean("mapred.mapper.new-api", false))) { newApiCommitter = true; LOG.info("Using mapred newApiCommitter."); } boolean copyHistory = false; try { String user = UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir = MRApps.getStagingAreaDir(conf, user); FileSystem fs = getFileSystem(conf); boolean stagingExists = fs.exists(stagingDir); Path startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId); boolean commitStarted = fs.exists(startCommitFile); Path endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId); boolean commitSuccess = fs.exists(endCommitSuccessFile); Path endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId); boolean commitFailure = fs.exists(endCommitFailureFile); if(!stagingExists) { isLastAMRetry = true; LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because the staging dir doesn't exist."); errorHappenedShutDown = true; forcedState = JobStateInternal.ERROR; shutDownMessage = "Staging dir does not exist " + stagingDir; LOG.fatal(shutDownMessage); } else if (commitStarted) { //A commit was started so this is the last time, we just need to know // what result we will use to notify, and how we will unregister errorHappenedShutDown = true; isLastAMRetry = true; LOG.info("Attempt num: " + appAttemptID.getAttemptId() + " is last retry: " + isLastAMRetry + " because a commit was started."); copyHistory = true; if (commitSuccess) { shutDownMessage = "Job commit succeeded in a prior MRAppMaster attempt " + "before it crashed. Recovering."; forcedState = JobStateInternal.SUCCEEDED; } else if (commitFailure) { shutDownMessage = "Job commit failed in a prior MRAppMaster attempt " + "before it crashed. Not retrying."; forcedState = JobStateInternal.FAILED; } else { //The commit is still pending, commit error shutDownMessage = "Job commit from a prior MRAppMaster attempt is " + "potentially in progress. Preventing multiple commit executions"; forcedState = JobStateInternal.ERROR; } } } catch (IOException e) { throw new YarnRuntimeException("Error while initializing", e); } if (errorHappenedShutDown) { NoopEventHandler eater = new NoopEventHandler(); //We do not have a JobEventDispatcher in this path dispatcher.register(JobEventType.class, eater); EventHandler<JobHistoryEvent> historyService = null; if (copyHistory) { historyService = createJobHistoryHandler(context); dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService); } else { dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, eater); } if (copyHistory) { // Now that there's a FINISHING state for application on RM to give AMs // plenty of time to clean up after unregister it's safe to clean staging // directory after unregistering with RM. So, we start the staging-dir // cleaner BEFORE the ContainerAllocator so that on shut-down, // ContainerAllocator unregisters first and then the staging-dir cleaner // deletes staging directory. addService(createStagingDirCleaningService()); } // service to allocate containers from RM (if non-uber) or to fake it (uber) containerAllocator = createContainerAllocator(null, context); addIfService(containerAllocator); dispatcher.register(ContainerAllocator.EventType.class, containerAllocator); if (copyHistory) { // Add the JobHistoryEventHandler last so that it is properly stopped first. // This will guarantee that all history-events are flushed before AM goes // ahead with shutdown. // Note: Even though JobHistoryEventHandler is started last, if any // component creates a JobHistoryEvent in the meanwhile, it will be just be // queued inside the JobHistoryEventHandler addIfService(historyService); JobHistoryCopyService cpHist = new JobHistoryCopyService(appAttemptID, dispatcher.getEventHandler()); addIfService(cpHist); } } else { committer = createOutputCommitter(conf); //service to handle requests from JobClient clientService = createClientService(context); // Init ClientService separately so that we stop it separately, since this // service needs to wait some time before it stops so clients can know the // final states clientService.init(conf); containerAllocator = createContainerAllocator(clientService, context); //service to handle the output committer committerEventHandler = createCommitterEventHandler(context, committer); addIfService(committerEventHandler); //service to handle requests to TaskUmbilicalProtocol taskAttemptListener = createTaskAttemptListener(context); addIfService(taskAttemptListener); //service to log job history events EventHandler<JobHistoryEvent> historyService = createJobHistoryHandler(context); dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, historyService); this.jobEventDispatcher = new JobEventDispatcher(); //register the event dispatchers dispatcher.register(JobEventType.class, jobEventDispatcher); dispatcher.register(TaskEventType.class, new TaskEventDispatcher()); dispatcher.register(TaskAttemptEventType.class, new TaskAttemptEventDispatcher()); dispatcher.register(CommitterEventType.class, committerEventHandler); if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false) || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) { //optional service to speculate on task attempts' progress speculator = createSpeculator(conf, context); addIfService(speculator); } speculatorEventDispatcher = new SpeculatorEventDispatcher(conf); dispatcher.register(Speculator.EventType.class, speculatorEventDispatcher); // Now that there's a FINISHING state for application on RM to give AMs // plenty of time to clean up after unregister it's safe to clean staging // directory after unregistering with RM. So, we start the staging-dir // cleaner BEFORE the ContainerAllocator so that on shut-down, // ContainerAllocator unregisters first and then the staging-dir cleaner // deletes staging directory. addService(createStagingDirCleaningService()); // service to allocate containers from RM (if non-uber) or to fake it (uber) addIfService(containerAllocator); dispatcher.register(ContainerAllocator.EventType.class, containerAllocator); // corresponding service to launch allocated containers via NodeManager containerLauncher = createContainerLauncher(context); addIfService(containerLauncher); dispatcher.register(ContainerLauncher.EventType.class, containerLauncher); // Add the JobHistoryEventHandler last so that it is properly stopped first. // This will guarantee that all history-events are flushed before AM goes // ahead with shutdown. // Note: Even though JobHistoryEventHandler is started last, if any // component creates a JobHistoryEvent in the meanwhile, it will be just be // queued inside the JobHistoryEventHandler addIfService(historyService); } super.serviceInit(conf); } // end of init() protected Dispatcher createDispatcher() { return new AsyncDispatcher(); } private OutputCommitter createOutputCommitter(Configuration conf) { return callWithJobClassLoader(conf, new Action<OutputCommitter>() { public OutputCommitter call(Configuration conf) { OutputCommitter committer = null; LOG.info("OutputCommitter set in config " + conf.get("mapred.output.committer.class")); if (newApiCommitter) { org.apache.hadoop.mapreduce.v2.api.records.TaskId taskID = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = MRBuilderUtils.newTaskAttemptId(taskID, 0); TaskAttemptContext taskContext = new TaskAttemptContextImpl(conf, TypeConverter.fromYarn(attemptID)); OutputFormat outputFormat; try { outputFormat = ReflectionUtils.newInstance(taskContext .getOutputFormatClass(), conf); committer = outputFormat.getOutputCommitter(taskContext); } catch (Exception e) { throw new YarnRuntimeException(e); } } else { committer = ReflectionUtils.newInstance(conf.getClass( "mapred.output.committer.class", FileOutputCommitter.class, org.apache.hadoop.mapred.OutputCommitter.class), conf); } LOG.info("OutputCommitter is " + committer.getClass().getName()); return committer; } }); } protected boolean keepJobFiles(JobConf conf) { return (conf.getKeepTaskFilesPattern() != null || conf .getKeepFailedTaskFiles()); } /** * Create the default file System for this job. * @param conf the conf object * @return the default filesystem for this job * @throws IOException */ protected FileSystem getFileSystem(Configuration conf) throws IOException { return FileSystem.get(conf); } protected Credentials getCredentials() { return jobCredentials; } /** * clean up staging directories for the job. * @throws IOException */ public void cleanupStagingDir() throws IOException { /* make sure we clean the staging files */ String jobTempDir = null; FileSystem fs = getFileSystem(getConfig()); try { if (!keepJobFiles(new JobConf(getConfig()))) { jobTempDir = getConfig().get(MRJobConfig.MAPREDUCE_JOB_DIR); if (jobTempDir == null) { LOG.warn("Job Staging directory is null"); return; } Path jobTempDirPath = new Path(jobTempDir); LOG.info("Deleting staging directory " + FileSystem.getDefaultUri(getConfig()) + " " + jobTempDir); fs.delete(jobTempDirPath, true); } } catch(IOException io) { LOG.error("Failed to cleanup staging dir " + jobTempDir, io); } } /** * Exit call. Just in a function call to enable testing. */ protected void sysexit() { System.exit(0); } @VisibleForTesting public void shutDownJob() { // job has finished // this is the only job, so shut down the Appmaster // note in a workflow scenario, this may lead to creation of a new // job (FIXME?) try { //if isLastAMRetry comes as true, should never set it to false if ( !isLastAMRetry){ if (((JobImpl)job).getInternalState() != JobStateInternal.REBOOT) { LOG.info("Job finished cleanly, recording last MRAppMaster retry"); isLastAMRetry = true; } } notifyIsLastAMRetry(isLastAMRetry); // Stop all services // This will also send the final report to the ResourceManager LOG.info("Calling stop for all the services"); MRAppMaster.this.stop(); if (isLastAMRetry) { // Send job-end notification when it is safe to report termination to // users and it is the last AM retry if (getConfig().get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL) != null) { try { LOG.info("Job end notification started for jobID : " + job.getReport().getJobId()); JobEndNotifier notifier = new JobEndNotifier(); notifier.setConf(getConfig()); JobReport report = job.getReport(); // If unregistration fails, the final state is unavailable. However, // at the last AM Retry, the client will finally be notified FAILED // from RM, so we should let users know FAILED via notifier as well if (!context.hasSuccessfullyUnregistered()) { report.setJobState(JobState.FAILED); } notifier.notify(report); } catch (InterruptedException ie) { LOG.warn("Job end notification interrupted for jobID : " + job.getReport().getJobId(), ie); } } } try { Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } clientService.stop(); } catch (Throwable t) { LOG.warn("Graceful stop failed. Exiting.. ", t); exitMRAppMaster(1, t); } exitMRAppMaster(0, null); } /** MRAppMaster exit method which has been instrumented for both runtime and * unit testing. * If the main thread has not been started, this method was called from a * test. In that case, configure the ExitUtil object to not exit the JVM. * * @param status integer indicating exit status * @param t throwable exception that could be null */ private void exitMRAppMaster(int status, Throwable t) { if (!mainStarted) { ExitUtil.disableSystemExit(); } try { if (t != null) { ExitUtil.terminate(status, t); } else { ExitUtil.terminate(status); } } catch (ExitUtil.ExitException ee) { // ExitUtil.ExitException is only thrown from the ExitUtil test code when // SystemExit has been disabled. It is always thrown in in the test code, // even when no error occurs. Ignore the exception so that tests don't // need to handle it. } } private class JobFinishEventHandler implements EventHandler<JobFinishEvent> { @Override public void handle(JobFinishEvent event) { // Create a new thread to shutdown the AM. We should not do it in-line // to avoid blocking the dispatcher itself. new Thread() { @Override public void run() { shutDownJob(); } }.start(); } } /** * create an event handler that handles the job finish event. * @return the job finish event handler. */ protected EventHandler<JobFinishEvent> createJobFinishEventHandler() { return new JobFinishEventHandler(); } /** Create and initialize (but don't start) a single job. * @param forcedState a state to force the job into or null for normal operation. * @param diagnostic a diagnostic message to include with the job. */ protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) { // create single job Job newJob = new JobImpl(jobId, appAttemptID, conf, dispatcher.getEventHandler(), taskAttemptListener, jobTokenSecretManager, jobCredentials, clock, completedTasksFromPreviousRun, metrics, committer, newApiCommitter, currentUser.getUserName(), appSubmitTime, amInfos, context, forcedState, diagnostic); ((RunningAppContext) context).jobs.put(newJob.getID(), newJob); dispatcher.register(JobFinishEvent.Type.class, createJobFinishEventHandler()); return newJob; } // end createJob() /** * Obtain the tokens needed by the job and put them in the UGI * @param conf */ protected void initJobCredentialsAndUGI(Configuration conf) { try { this.currentUser = UserGroupInformation.getCurrentUser(); this.jobCredentials = ((JobConf)conf).getCredentials(); if (CryptoUtils.isEncryptedSpillEnabled(conf)) { int keyLen = conf.getInt( MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS, MRJobConfig .DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS); KeyGenerator keyGen = KeyGenerator.getInstance(INTERMEDIATE_DATA_ENCRYPTION_ALGO); keyGen.init(keyLen); encryptedSpillKey = keyGen.generateKey().getEncoded(); } else { encryptedSpillKey = new byte[] {0}; } } catch (IOException e) { throw new YarnRuntimeException(e); } catch (NoSuchAlgorithmException e) { throw new YarnRuntimeException(e); } } protected EventHandler<JobHistoryEvent> createJobHistoryHandler( AppContext context) { this.jobHistoryEventHandler = new JobHistoryEventHandler(context, getStartCount()); return this.jobHistoryEventHandler; } protected AbstractService createStagingDirCleaningService() { return new StagingDirCleaningService(); } protected Speculator createSpeculator(Configuration conf, final AppContext context) { return callWithJobClassLoader(conf, new Action<Speculator>() { public Speculator call(Configuration conf) { Class<? extends Speculator> speculatorClass; try { speculatorClass // "yarn.mapreduce.job.speculator.class" = conf.getClass(MRJobConfig.MR_AM_JOB_SPECULATOR, DefaultSpeculator.class, Speculator.class); Constructor<? extends Speculator> speculatorConstructor = speculatorClass.getConstructor (Configuration.class, AppContext.class); Speculator result = speculatorConstructor.newInstance(conf, context); return result; } catch (InstantiationException ex) { LOG.error("Can't make a speculator -- check " + MRJobConfig.MR_AM_JOB_SPECULATOR, ex); throw new YarnRuntimeException(ex); } catch (IllegalAccessException ex) { LOG.error("Can't make a speculator -- check " + MRJobConfig.MR_AM_JOB_SPECULATOR, ex); throw new YarnRuntimeException(ex); } catch (InvocationTargetException ex) { LOG.error("Can't make a speculator -- check " + MRJobConfig.MR_AM_JOB_SPECULATOR, ex); throw new YarnRuntimeException(ex); } catch (NoSuchMethodException ex) { LOG.error("Can't make a speculator -- check " + MRJobConfig.MR_AM_JOB_SPECULATOR, ex); throw new YarnRuntimeException(ex); } } }); } protected TaskAttemptListener createTaskAttemptListener(AppContext context) { TaskAttemptListener lis = new TaskAttemptListenerImpl(context, jobTokenSecretManager, getRMHeartbeatHandler(), encryptedSpillKey); return lis; } protected EventHandler<CommitterEvent> createCommitterEventHandler( AppContext context, OutputCommitter committer) { return new CommitterEventHandler(context, committer, getRMHeartbeatHandler(), jobClassLoader); } protected ContainerAllocator createContainerAllocator( final ClientService clientService, final AppContext context) { return new ContainerAllocatorRouter(clientService, context); } protected RMHeartbeatHandler getRMHeartbeatHandler() { return (RMHeartbeatHandler) containerAllocator; } protected ContainerLauncher createContainerLauncher(final AppContext context) { return new ContainerLauncherRouter(context); } //TODO:should have an interface for MRClientService protected ClientService createClientService(AppContext context) { return new MRClientService(context); } public ApplicationId getAppID() { return appAttemptID.getApplicationId(); } public ApplicationAttemptId getAttemptID() { return appAttemptID; } public JobId getJobId() { return jobId; } public OutputCommitter getCommitter() { return committer; } public boolean isNewApiCommitter() { return newApiCommitter; } public int getStartCount() { return appAttemptID.getAttemptId(); } public AppContext getContext() { return context; } public Dispatcher getDispatcher() { return dispatcher; } public Map<TaskId, TaskInfo> getCompletedTaskFromPreviousRun() { return completedTasksFromPreviousRun; } public List<AMInfo> getAllAMInfos() { return amInfos; } public ContainerAllocator getContainerAllocator() { return containerAllocator; } public ContainerLauncher getContainerLauncher() { return containerLauncher; } public TaskAttemptListener getTaskAttemptListener() { return taskAttemptListener; } public Boolean isLastAMRetry() { return isLastAMRetry; } /** * By the time life-cycle of this router starts, job-init would have already * happened. */ private final class ContainerAllocatorRouter extends AbstractService implements ContainerAllocator, RMHeartbeatHandler { private final ClientService clientService; private final AppContext context; private ContainerAllocator containerAllocator; ContainerAllocatorRouter(ClientService clientService, AppContext context) { super(ContainerAllocatorRouter.class.getName()); this.clientService = clientService; this.context = context; } @Override protected void serviceStart() throws Exception { if (job.isUber()) { MRApps.setupDistributedCacheLocal(getConfig()); this.containerAllocator = new LocalContainerAllocator( this.clientService, this.context, nmHost, nmPort, nmHttpPort , containerID); } else { this.containerAllocator = new RMContainerAllocator( this.clientService, this.context); } ((Service)this.containerAllocator).init(getConfig()); ((Service)this.containerAllocator).start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { ServiceOperations.stop((Service) this.containerAllocator); super.serviceStop(); } @Override public void handle(ContainerAllocatorEvent event) { this.containerAllocator.handle(event); } public void setSignalled(boolean isSignalled) { ((RMCommunicator) containerAllocator).setSignalled(isSignalled); } public void setShouldUnregister(boolean shouldUnregister) { ((RMCommunicator) containerAllocator).setShouldUnregister(shouldUnregister); } @Override public long getLastHeartbeatTime() { return ((RMCommunicator) containerAllocator).getLastHeartbeatTime(); } @Override public void runOnNextHeartbeat(Runnable callback) { ((RMCommunicator) containerAllocator).runOnNextHeartbeat(callback); } } /** * By the time life-cycle of this router starts, job-init would have already * happened. */ private final class ContainerLauncherRouter extends AbstractService implements ContainerLauncher { private final AppContext context; private ContainerLauncher containerLauncher; ContainerLauncherRouter(AppContext context) { super(ContainerLauncherRouter.class.getName()); this.context = context; } @Override protected void serviceStart() throws Exception { if (job.isUber()) { this.containerLauncher = new LocalContainerLauncher(context, (TaskUmbilicalProtocol) taskAttemptListener, jobClassLoader); ((LocalContainerLauncher) this.containerLauncher) .setEncryptedSpillKey(encryptedSpillKey); } else { this.containerLauncher = new ContainerLauncherImpl(context); } ((Service)this.containerLauncher).init(getConfig()); ((Service)this.containerLauncher).start(); super.serviceStart(); } @Override public void handle(ContainerLauncherEvent event) { this.containerLauncher.handle(event); } @Override protected void serviceStop() throws Exception { ServiceOperations.stop((Service) this.containerLauncher); super.serviceStop(); } } private final class StagingDirCleaningService extends AbstractService { StagingDirCleaningService() { super(StagingDirCleaningService.class.getName()); } @Override protected void serviceStop() throws Exception { try { if(isLastAMRetry) { cleanupStagingDir(); } else { LOG.info("Skipping cleaning up the staging dir. " + "assuming AM will be retried."); } } catch (IOException io) { LOG.error("Failed to cleanup staging dir: ", io); } super.serviceStop(); } } public class RunningAppContext implements AppContext { private final Map<JobId, Job> jobs = new ConcurrentHashMap<JobId, Job>(); private final Configuration conf; private final ClusterInfo clusterInfo = new ClusterInfo(); private final ClientToAMTokenSecretManager clientToAMTokenSecretManager; private final TaskAttemptFinishingMonitor taskAttemptFinishingMonitor; public RunningAppContext(Configuration config, TaskAttemptFinishingMonitor taskAttemptFinishingMonitor) { this.conf = config; this.clientToAMTokenSecretManager = new ClientToAMTokenSecretManager(appAttemptID, null); this.taskAttemptFinishingMonitor = taskAttemptFinishingMonitor; } @Override public ApplicationAttemptId getApplicationAttemptId() { return appAttemptID; } @Override public ApplicationId getApplicationID() { return appAttemptID.getApplicationId(); } @Override public String getApplicationName() { return appName; } @Override public long getStartTime() { return startTime; } @Override public Job getJob(JobId jobID) { return jobs.get(jobID); } @Override public Map<JobId, Job> getAllJobs() { return jobs; } @Override public EventHandler getEventHandler() { return dispatcher.getEventHandler(); } @Override public CharSequence getUser() { return this.conf.get(MRJobConfig.USER_NAME); } @Override public Clock getClock() { return clock; } @Override public ClusterInfo getClusterInfo() { return this.clusterInfo; } @Override public Set<String> getBlacklistedNodes() { return ((RMContainerRequestor) containerAllocator).getBlacklistedNodes(); } @Override public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { return clientToAMTokenSecretManager; } @Override public boolean isLastAMRetry(){ return isLastAMRetry; } @Override public boolean hasSuccessfullyUnregistered() { return successfullyUnregistered.get(); } public void markSuccessfulUnregistration() { successfullyUnregistered.set(true); } public void resetIsLastAMRetry() { isLastAMRetry = false; } @Override public String getNMHostname() { return nmHost; } @Override public TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor() { return taskAttemptFinishingMonitor; } } @SuppressWarnings("unchecked") @Override protected void serviceStart() throws Exception { amInfos = new LinkedList<AMInfo>(); completedTasksFromPreviousRun = new HashMap<TaskId, TaskInfo>(); processRecovery(); // Current an AMInfo for the current AM generation. AMInfo amInfo = MRBuilderUtils.newAMInfo(appAttemptID, startTime, containerID, nmHost, nmPort, nmHttpPort); // /////////////////// Create the job itself. job = createJob(getConfig(), forcedState, shutDownMessage); // End of creating the job. // Send out an MR AM inited event for all previous AMs. for (AMInfo info : amInfos) { dispatcher.getEventHandler().handle( new JobHistoryEvent(job.getID(), new AMStartedEvent(info .getAppAttemptId(), info.getStartTime(), info.getContainerId(), info.getNodeManagerHost(), info.getNodeManagerPort(), info .getNodeManagerHttpPort(), appSubmitTime))); } // Send out an MR AM inited event for this AM. dispatcher.getEventHandler().handle( new JobHistoryEvent(job.getID(), new AMStartedEvent(amInfo .getAppAttemptId(), amInfo.getStartTime(), amInfo.getContainerId(), amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort(), amInfo .getNodeManagerHttpPort(), this.forcedState == null ? null : this.forcedState.toString(), appSubmitTime))); amInfos.add(amInfo); // metrics system init is really init & start. // It's more test friendly to put it here. DefaultMetricsSystem.initialize("MRAppMaster"); boolean initFailed = false; if (!errorHappenedShutDown) { // create a job event for job intialization JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT); // Send init to the job (this does NOT trigger job execution) // This is a synchronous call, not an event through dispatcher. We want // job-init to be done completely here. jobEventDispatcher.handle(initJobEvent); // If job is still not initialized, an error happened during // initialization. Must complete starting all of the services so failure // events can be processed. initFailed = (((JobImpl)job).getInternalState() != JobStateInternal.INITED); // JobImpl's InitTransition is done (call above is synchronous), so the // "uber-decision" (MR-1220) has been made. Query job and switch to // ubermode if appropriate (by registering different container-allocator // and container-launcher services/event-handlers). if (job.isUber()) { speculatorEventDispatcher.disableSpeculation(); LOG.info("MRAppMaster uberizing job " + job.getID() + " in local container (\"uber-AM\") on node " + nmHost + ":" + nmPort + "."); } else { // send init to speculator only for non-uber jobs. // This won't yet start as dispatcher isn't started yet. dispatcher.getEventHandler().handle( new SpeculatorEvent(job.getID(), clock.getTime())); LOG.info("MRAppMaster launching normal, non-uberized, multi-container " + "job " + job.getID() + "."); } // Start ClientService here, since it's not initialized if // errorHappenedShutDown is true clientService.start(); } //start all the components super.serviceStart(); // finally set the job classloader MRApps.setClassLoader(jobClassLoader, getConfig()); // set job classloader if configured Limits.init(getConfig()); if (initFailed) { JobEvent initFailedEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT_FAILED); jobEventDispatcher.handle(initFailedEvent); } else { // All components have started, start the job. startJobs(); } } protected void shutdownTaskLog() { TaskLog.syncLogsShutdown(logSyncer); } @Override public void stop() { super.stop(); shutdownTaskLog(); } private boolean isRecoverySupported() throws IOException { boolean isSupported = false; Configuration conf = getConfig(); if (committer != null) { final JobContext _jobContext; if (newApiCommitter) { _jobContext = new JobContextImpl( conf, TypeConverter.fromYarn(getJobId())); } else { _jobContext = new org.apache.hadoop.mapred.JobContextImpl( new JobConf(conf), TypeConverter.fromYarn(getJobId())); } isSupported = callWithJobClassLoader(conf, new ExceptionAction<Boolean>() { public Boolean call(Configuration conf) throws IOException { return committer.isRecoverySupported(_jobContext); } }); } return isSupported; } private void processRecovery() throws IOException{ if (appAttemptID.getAttemptId() == 1) { return; // no need to recover on the first attempt } boolean recoveryEnabled = getConfig().getBoolean( MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT); boolean recoverySupportedByCommitter = isRecoverySupported(); // If a shuffle secret was not provided by the job client then this app // attempt will generate one. However that disables recovery if there // are reducers as the shuffle secret would be app attempt specific. int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0); boolean shuffleKeyValidForRecovery = TokenCache.getShuffleSecretKey(jobCredentials) != null; if (recoveryEnabled && recoverySupportedByCommitter && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) { LOG.info("Recovery is enabled. " + "Will try to recover from previous life on best effort basis."); try { parsePreviousJobHistory(); } catch (IOException e) { LOG.warn("Unable to parse prior job history, aborting recovery", e); // try to get just the AMInfos amInfos.addAll(readJustAMInfos()); } } else { LOG.info("Will not try to recover. recoveryEnabled: " + recoveryEnabled + " recoverySupportedByCommitter: " + recoverySupportedByCommitter + " numReduceTasks: " + numReduceTasks + " shuffleKeyValidForRecovery: " + shuffleKeyValidForRecovery + " ApplicationAttemptID: " + appAttemptID.getAttemptId()); // Get the amInfos anyways whether recovery is enabled or not amInfos.addAll(readJustAMInfos()); } } private static FSDataInputStream getPreviousJobHistoryStream( Configuration conf, ApplicationAttemptId appAttemptId) throws IOException { Path historyFile = JobHistoryUtils.getPreviousJobHistoryPath(conf, appAttemptId); LOG.info("Previous history file is at " + historyFile); return historyFile.getFileSystem(conf).open(historyFile); } private void parsePreviousJobHistory() throws IOException { FSDataInputStream in = getPreviousJobHistoryStream(getConfig(), appAttemptID); JobHistoryParser parser = new JobHistoryParser(in); JobInfo jobInfo = parser.parse(); Exception parseException = parser.getParseException(); if (parseException != null) { LOG.info("Got an error parsing job-history file" + ", ignoring incomplete events.", parseException); } Map<org.apache.hadoop.mapreduce.TaskID, TaskInfo> taskInfos = jobInfo .getAllTasks(); for (TaskInfo taskInfo : taskInfos.values()) { if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) { Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = taskInfo.getAllTaskAttempts().entrySet().iterator(); while (taskAttemptIterator.hasNext()) { Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next(); if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) { taskAttemptIterator.remove(); } } completedTasksFromPreviousRun .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo); LOG.info("Read from history task " + TypeConverter.toYarn(taskInfo.getTaskId())); } } LOG.info("Read completed tasks from history " + completedTasksFromPreviousRun.size()); recoveredJobStartTime = jobInfo.getLaunchTime(); // recover AMInfos List<JobHistoryParser.AMInfo> jhAmInfoList = jobInfo.getAMInfos(); if (jhAmInfoList != null) { for (JobHistoryParser.AMInfo jhAmInfo : jhAmInfoList) { AMInfo amInfo = MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(), jhAmInfo.getStartTime(), jhAmInfo.getContainerId(), jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(), jhAmInfo.getNodeManagerHttpPort()); amInfos.add(amInfo); } } } private List<AMInfo> readJustAMInfos() { List<AMInfo> amInfos = new ArrayList<AMInfo>(); FSDataInputStream inputStream = null; try { inputStream = getPreviousJobHistoryStream(getConfig(), appAttemptID); EventReader jobHistoryEventReader = new EventReader(inputStream); // All AMInfos are contiguous. Track when the first AMStartedEvent // appears. boolean amStartedEventsBegan = false; HistoryEvent event; while ((event = jobHistoryEventReader.getNextEvent()) != null) { if (event.getEventType() == EventType.AM_STARTED) { if (!amStartedEventsBegan) { // First AMStartedEvent. amStartedEventsBegan = true; } AMStartedEvent amStartedEvent = (AMStartedEvent) event; amInfos.add(MRBuilderUtils.newAMInfo( amStartedEvent.getAppAttemptId(), amStartedEvent.getStartTime(), amStartedEvent.getContainerId(), StringInterner.weakIntern(amStartedEvent.getNodeManagerHost()), amStartedEvent.getNodeManagerPort(), amStartedEvent.getNodeManagerHttpPort())); } else if (amStartedEventsBegan) { // This means AMStartedEvents began and this event is a // non-AMStarted event. // No need to continue reading all the other events. break; } } } catch (IOException e) { LOG.warn("Could not parse the old history file. " + "Will not have old AMinfos ", e); } finally { if (inputStream != null) { IOUtils.closeQuietly(inputStream); } } return amInfos; } /** * This can be overridden to instantiate multiple jobs and create a * workflow. * * TODO: Rework the design to actually support this. Currently much of the * job stuff has been moved to init() above to support uberization (MR-1220). * In a typical workflow, one presumably would want to uberize only a subset * of the jobs (the "small" ones), which is awkward with the current design. */ @SuppressWarnings("unchecked") protected void startJobs() { /** create a job-start event to get this ball rolling */ JobEvent startJobEvent = new JobStartEvent(job.getID(), recoveredJobStartTime); /** send the job-start event. this triggers the job execution. */ dispatcher.getEventHandler().handle(startJobEvent); } private class JobEventDispatcher implements EventHandler<JobEvent> { @SuppressWarnings("unchecked") @Override public void handle(JobEvent event) { ((EventHandler<JobEvent>)context.getJob(event.getJobId())).handle(event); } } private class TaskEventDispatcher implements EventHandler<TaskEvent> { @SuppressWarnings("unchecked") @Override public void handle(TaskEvent event) { Task task = context.getJob(event.getTaskID().getJobId()).getTask( event.getTaskID()); ((EventHandler<TaskEvent>)task).handle(event); } } private class TaskAttemptEventDispatcher implements EventHandler<TaskAttemptEvent> { @SuppressWarnings("unchecked") @Override public void handle(TaskAttemptEvent event) { Job job = context.getJob(event.getTaskAttemptID().getTaskId().getJobId()); Task task = job.getTask(event.getTaskAttemptID().getTaskId()); TaskAttempt attempt = task.getAttempt(event.getTaskAttemptID()); ((EventHandler<TaskAttemptEvent>) attempt).handle(event); } } private class SpeculatorEventDispatcher implements EventHandler<SpeculatorEvent> { private final Configuration conf; private volatile boolean disabled; public SpeculatorEventDispatcher(Configuration config) { this.conf = config; } @Override public void handle(final SpeculatorEvent event) { if (disabled) { return; } TaskId tId = event.getTaskID(); TaskType tType = null; /* event's TaskId will be null if the event type is JOB_CREATE or * ATTEMPT_STATUS_UPDATE */ if (tId != null) { tType = tId.getTaskType(); } boolean shouldMapSpec = conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false); boolean shouldReduceSpec = conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); /* The point of the following is to allow the MAP and REDUCE speculative * config values to be independent: * IF spec-exec is turned on for maps AND the task is a map task * OR IF spec-exec is turned on for reduces AND the task is a reduce task * THEN call the speculator to handle the event. */ if ( (shouldMapSpec && (tType == null || tType == TaskType.MAP)) || (shouldReduceSpec && (tType == null || tType == TaskType.REDUCE))) { // Speculator IS enabled, direct the event to there. callWithJobClassLoader(conf, new Action<Void>() { public Void call(Configuration conf) { speculator.handle(event); return null; } }); } } public void disableSpeculation() { disabled = true; } } /** * Eats events that are not needed in some error cases. */ private static class NoopEventHandler implements EventHandler<Event> { @Override public void handle(Event event) { //Empty } } private static void validateInputParam(String value, String param) throws IOException { if (value == null) { String msg = param + " is null"; LOG.error(msg); throw new IOException(msg); } } public static void main(String[] args) { try { mainStarted = true; Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); String containerIdStr = System.getenv(Environment.CONTAINER_ID.name()); String nodeHostString = System.getenv(Environment.NM_HOST.name()); String nodePortString = System.getenv(Environment.NM_PORT.name()); String nodeHttpPortString = System.getenv(Environment.NM_HTTP_PORT.name()); String appSubmitTimeStr = System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV); validateInputParam(containerIdStr, Environment.CONTAINER_ID.name()); validateInputParam(nodeHostString, Environment.NM_HOST.name()); validateInputParam(nodePortString, Environment.NM_PORT.name()); validateInputParam(nodeHttpPortString, Environment.NM_HTTP_PORT.name()); validateInputParam(appSubmitTimeStr, ApplicationConstants.APP_SUBMIT_TIME_ENV); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId(); long appSubmitTime = Long.parseLong(appSubmitTimeStr); MRAppMaster appMaster = new MRAppMaster(applicationAttemptId, containerId, nodeHostString, Integer.parseInt(nodePortString), Integer.parseInt(nodeHttpPortString), appSubmitTime); ShutdownHookManager.get().addShutdownHook( new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY); JobConf conf = new JobConf(new YarnConfiguration()); conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE)); MRWebAppUtil.initialize(conf); // log the system properties String systemPropsToLog = MRApps.getSystemPropertiesToLog(conf); if (systemPropsToLog != null) { LOG.info(systemPropsToLog); } String jobUserName = System .getenv(ApplicationConstants.Environment.USER.name()); conf.set(MRJobConfig.USER_NAME, jobUserName); initAndStartAppMaster(appMaster, conf, jobUserName); } catch (Throwable t) { LOG.fatal("Error starting MRAppMaster", t); ExitUtil.terminate(1, t); } } // The shutdown hook that runs when a signal is received AND during normal // close of the JVM. static class MRAppMasterShutdownHook implements Runnable { MRAppMaster appMaster; MRAppMasterShutdownHook(MRAppMaster appMaster) { this.appMaster = appMaster; } public void run() { LOG.info("MRAppMaster received a signal. Signaling RMCommunicator and " + "JobHistoryEventHandler."); // Notify the JHEH and RMCommunicator that a SIGTERM has been received so // that they don't take too long in shutting down if(appMaster.containerAllocator instanceof ContainerAllocatorRouter) { ((ContainerAllocatorRouter) appMaster.containerAllocator) .setSignalled(true); } appMaster.notifyIsLastAMRetry(appMaster.isLastAMRetry); appMaster.stop(); } } public void notifyIsLastAMRetry(boolean isLastAMRetry){ if(containerAllocator instanceof ContainerAllocatorRouter) { LOG.info("Notify RMCommunicator isAMLastRetry: " + isLastAMRetry); ((ContainerAllocatorRouter) containerAllocator) .setShouldUnregister(isLastAMRetry); } if(jobHistoryEventHandler != null) { LOG.info("Notify JHEH isAMLastRetry: " + isLastAMRetry); jobHistoryEventHandler.setForcejobCompletion(isLastAMRetry); } } protected static void initAndStartAppMaster(final MRAppMaster appMaster, final JobConf conf, String jobUserName) throws IOException, InterruptedException { UserGroupInformation.setConfiguration(conf); // Security framework already loaded the tokens into current UGI, just use // them Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); LOG.info("Executing with tokens:"); for (Token<?> token : credentials.getAllTokens()) { LOG.info(token); } UserGroupInformation appMasterUgi = UserGroupInformation .createRemoteUser(jobUserName); appMasterUgi.addCredentials(credentials); // Now remove the AM->RM token so tasks don't have it Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } conf.getCredentials().addAll(credentials); appMasterUgi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { appMaster.init(conf); appMaster.start(); if(appMaster.errorHappenedShutDown) { throw new IOException("Was asked to shut down."); } return null; } }); } /** * Creates a job classloader based on the configuration if the job classloader * is enabled. It is a no-op if the job classloader is not enabled. */ private void createJobClassLoader(Configuration conf) throws IOException { jobClassLoader = MRApps.createJobClassLoader(conf); } /** * Executes the given action with the job classloader set as the configuration * classloader as well as the thread context class loader if the job * classloader is enabled. After the call, the original classloader is * restored. * * If the job classloader is enabled and the code needs to load user-supplied * classes via configuration or thread context classloader, this method should * be used in order to load them. * * @param conf the configuration on which the classloader will be set * @param action the callable action to be executed */ <T> T callWithJobClassLoader(Configuration conf, Action<T> action) { // if the job classloader is enabled, we may need it to load the (custom) // classes; we make the job classloader available and unset it once it is // done ClassLoader currentClassLoader = conf.getClassLoader(); boolean setJobClassLoader = jobClassLoader != null && currentClassLoader != jobClassLoader; if (setJobClassLoader) { MRApps.setClassLoader(jobClassLoader, conf); } try { return action.call(conf); } finally { if (setJobClassLoader) { // restore the original classloader MRApps.setClassLoader(currentClassLoader, conf); } } } /** * Executes the given action that can throw a checked exception with the job * classloader set as the configuration classloader as well as the thread * context class loader if the job classloader is enabled. After the call, the * original classloader is restored. * * If the job classloader is enabled and the code needs to load user-supplied * classes via configuration or thread context classloader, this method should * be used in order to load them. * * @param conf the configuration on which the classloader will be set * @param action the callable action to be executed * @throws IOException if the underlying action throws an IOException * @throws YarnRuntimeException if the underlying action throws an exception * other than an IOException */ <T> T callWithJobClassLoader(Configuration conf, ExceptionAction<T> action) throws IOException { // if the job classloader is enabled, we may need it to load the (custom) // classes; we make the job classloader available and unset it once it is // done ClassLoader currentClassLoader = conf.getClassLoader(); boolean setJobClassLoader = jobClassLoader != null && currentClassLoader != jobClassLoader; if (setJobClassLoader) { MRApps.setClassLoader(jobClassLoader, conf); } try { return action.call(conf); } catch (IOException e) { throw e; } catch (YarnRuntimeException e) { throw e; } catch (Exception e) { // wrap it with a YarnRuntimeException throw new YarnRuntimeException(e); } finally { if (setJobClassLoader) { // restore the original classloader MRApps.setClassLoader(currentClassLoader, conf); } } } /** * Action to be wrapped with setting and unsetting the job classloader */ private static interface Action<T> { T call(Configuration conf); } private static interface ExceptionAction<T> { T call(Configuration conf) throws Exception; } protected void shutdownLogManager() { LogManager.shutdown(); } @Override protected void serviceStop() throws Exception { super.serviceStop(); shutdownLogManager(); } public ClientService getClientService() { return clientService; } }
64,381
36.716462
98
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.Clock; /** * This class keeps track of tasks that have already been launched. It * determines if a task is alive and running or marks a task as dead if it does * not hear from it for a long time. * */ @SuppressWarnings({"unchecked", "rawtypes"}) public class TaskHeartbeatHandler extends AbstractService { private static class ReportTime { private long lastProgress; public ReportTime(long time) { setLastProgress(time); } public synchronized void setLastProgress(long time) { lastProgress = time; } public synchronized long getLastProgress() { return lastProgress; } } private static final Log LOG = LogFactory.getLog(TaskHeartbeatHandler.class); //thread which runs periodically to see the last time since a heartbeat is //received from a task. private Thread lostTaskCheckerThread; private volatile boolean stopped; private int taskTimeOut = 5 * 60 * 1000;// 5 mins private int taskTimeOutCheckInterval = 30 * 1000; // 30 seconds. private final EventHandler eventHandler; private final Clock clock; private ConcurrentMap<TaskAttemptId, ReportTime> runningAttempts; public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock, int numThreads) { super("TaskHeartbeatHandler"); this.eventHandler = eventHandler; this.clock = clock; runningAttempts = new ConcurrentHashMap<TaskAttemptId, ReportTime>(16, 0.75f, numThreads); } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); taskTimeOut = conf.getInt(MRJobConfig.TASK_TIMEOUT, 5 * 60 * 1000); taskTimeOutCheckInterval = conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000); } @Override protected void serviceStart() throws Exception { lostTaskCheckerThread = new Thread(new PingChecker()); lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker"); lostTaskCheckerThread.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { stopped = true; if (lostTaskCheckerThread != null) { lostTaskCheckerThread.interrupt(); } super.serviceStop(); } public void progressing(TaskAttemptId attemptID) { //only put for the registered attempts //TODO throw an exception if the task isn't registered. ReportTime time = runningAttempts.get(attemptID); if(time != null) { time.setLastProgress(clock.getTime()); } } public void register(TaskAttemptId attemptID) { runningAttempts.put(attemptID, new ReportTime(clock.getTime())); } public void unregister(TaskAttemptId attemptID) { runningAttempts.remove(attemptID); } private class PingChecker implements Runnable { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { Iterator<Map.Entry<TaskAttemptId, ReportTime>> iterator = runningAttempts.entrySet().iterator(); // avoid calculating current time everytime in loop long currentTime = clock.getTime(); while (iterator.hasNext()) { Map.Entry<TaskAttemptId, ReportTime> entry = iterator.next(); boolean taskTimedOut = (taskTimeOut > 0) && (currentTime > (entry.getValue().getLastProgress() + taskTimeOut)); if(taskTimedOut) { // task is lost, remove from the list and raise lost event iterator.remove(); eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(entry .getKey(), "AttemptID:" + entry.getKey().toString() + " Timed out after " + taskTimeOut / 1000 + " secs")); eventHandler.handle(new TaskAttemptEvent(entry.getKey(), TaskAttemptEventType.TA_TIMED_OUT)); } } try { Thread.sleep(taskTimeOutCheckInterval); } catch (InterruptedException e) { LOG.info("TaskHeartbeatHandler thread interrupted"); break; } } } } }
5,663
32.916168
86
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.io.IOException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.Proxy; import java.net.URL; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobContext; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.mortbay.log.Log; /** * <p>This class handles job end notification. Submitters of jobs can choose to * be notified of the end of a job by supplying a URL to which a connection * will be established. * <ul><li> The URL connection is fire and forget by default.</li> <li> * User can specify number of retry attempts and a time interval at which to * attempt retries</li><li> * Cluster administrators can set final parameters to set maximum number of * tries (0 would disable job end notification) and max time interval and a * proxy if needed</li><li> * The URL may contain sentinels which will be replaced by jobId and jobStatus * (eg. SUCCEEDED/KILLED/FAILED) </li> </ul> */ public class JobEndNotifier implements Configurable { private static final String JOB_ID = "$jobId"; private static final String JOB_STATUS = "$jobStatus"; private Configuration conf; protected String userUrl; protected String proxyConf; protected int numTries; //Number of tries to attempt notification protected int waitInterval; //Time (ms) to wait between retrying notification protected int timeout; // Timeout (ms) on the connection and notification protected URL urlToNotify; //URL to notify read from the config protected Proxy proxyToUse = Proxy.NO_PROXY; //Proxy to use for notification /** * Parse the URL that needs to be notified of the end of the job, along * with the number of retries in case of failure, the amount of time to * wait between retries and proxy settings * @param conf the configuration */ public void setConf(Configuration conf) { this.conf = conf; numTries = Math.min( conf.getInt(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, 0) + 1 , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, 1) ); waitInterval = Math.min( conf.getInt(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, 5000) , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, 5000) ); waitInterval = (waitInterval < 0) ? 5000 : waitInterval; timeout = conf.getInt(JobContext.MR_JOB_END_NOTIFICATION_TIMEOUT, JobContext.DEFAULT_MR_JOB_END_NOTIFICATION_TIMEOUT); userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL); proxyConf = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY); //Configure the proxy to use if its set. It should be set like //proxyType@proxyHostname:port if(proxyConf != null && !proxyConf.equals("") && proxyConf.lastIndexOf(":") != -1) { int typeIndex = proxyConf.indexOf("@"); Proxy.Type proxyType = Proxy.Type.HTTP; if(typeIndex != -1 && proxyConf.substring(0, typeIndex).compareToIgnoreCase("socks") == 0) { proxyType = Proxy.Type.SOCKS; } String hostname = proxyConf.substring(typeIndex + 1, proxyConf.lastIndexOf(":")); String portConf = proxyConf.substring(proxyConf.lastIndexOf(":") + 1); try { int port = Integer.parseInt(portConf); proxyToUse = new Proxy(proxyType, new InetSocketAddress(hostname, port)); Log.info("Job end notification using proxy type \"" + proxyType + "\" hostname \"" + hostname + "\" and port \"" + port + "\""); } catch(NumberFormatException nfe) { Log.warn("Job end notification couldn't parse configured proxy's port " + portConf + ". Not going to use a proxy"); } } } public Configuration getConf() { return conf; } /** * Notify the URL just once. Use best effort. */ protected boolean notifyURLOnce() { boolean success = false; try { Log.info("Job end notification trying " + urlToNotify); HttpURLConnection conn = (HttpURLConnection) urlToNotify.openConnection(proxyToUse); conn.setConnectTimeout(timeout); conn.setReadTimeout(timeout); conn.setAllowUserInteraction(false); if(conn.getResponseCode() != HttpURLConnection.HTTP_OK) { Log.warn("Job end notification to " + urlToNotify +" failed with code: " + conn.getResponseCode() + " and message \"" + conn.getResponseMessage() +"\""); } else { success = true; Log.info("Job end notification to " + urlToNotify + " succeeded"); } } catch(IOException ioe) { Log.warn("Job end notification to " + urlToNotify + " failed", ioe); } return success; } /** * Notify a server of the completion of a submitted job. The user must have * configured MRJobConfig.MR_JOB_END_NOTIFICATION_URL * @param jobReport JobReport used to read JobId and JobStatus * @throws InterruptedException */ public void notify(JobReport jobReport) throws InterruptedException { // Do we need job-end notification? if (userUrl == null) { Log.info("Job end notification URL not set, skipping."); return; } //Do string replacements for jobId and jobStatus if (userUrl.contains(JOB_ID)) { userUrl = userUrl.replace(JOB_ID, jobReport.getJobId().toString()); } if (userUrl.contains(JOB_STATUS)) { userUrl = userUrl.replace(JOB_STATUS, jobReport.getJobState().toString()); } // Create the URL, ensure sanity try { urlToNotify = new URL(userUrl); } catch (MalformedURLException mue) { Log.warn("Job end notification couldn't parse " + userUrl, mue); return; } // Send notification boolean success = false; while (numTries-- > 0 && !success) { Log.info("Job end notification attempts left " + numTries); success = notifyURLOnce(); if (!success) { Thread.sleep(waitInterval); } } if (!success) { Log.warn("Job end notification failed to notify : " + urlToNotify); } else { Log.info("Job end notification succeeded for " + jobReport.getJobId()); } } }
7,112
36.240838
80
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptFinishingMonitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor; import org.apache.hadoop.yarn.util.SystemClock; /** * This class generates TA_TIMED_OUT if the task attempt stays in FINISHING * state for too long. */ @SuppressWarnings({"unchecked", "rawtypes"}) public class TaskAttemptFinishingMonitor extends AbstractLivelinessMonitor<TaskAttemptId> { private EventHandler eventHandler; public TaskAttemptFinishingMonitor(EventHandler eventHandler) { super("TaskAttemptFinishingMonitor", new SystemClock()); this.eventHandler = eventHandler; } public void init(Configuration conf) { super.init(conf); int expireIntvl = conf.getInt(MRJobConfig.TASK_EXIT_TIMEOUT, MRJobConfig.TASK_EXIT_TIMEOUT_DEFAULT); int checkIntvl = conf.getInt( MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS, MRJobConfig.TASK_EXIT_TIMEOUT_CHECK_INTERVAL_MS_DEFAULT); setExpireInterval(expireIntvl); setMonitorInterval(checkIntvl); } @Override protected void expire(TaskAttemptId id) { eventHandler.handle( new TaskAttemptEvent(id, TaskAttemptEventType.TA_TIMED_OUT)); } }
2,349
35.71875
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.util.Map; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; import org.apache.hadoop.yarn.util.Clock; /** * Context interface for sharing information across components in YARN App. */ @InterfaceAudience.Private public interface AppContext { ApplicationId getApplicationID(); ApplicationAttemptId getApplicationAttemptId(); String getApplicationName(); long getStartTime(); CharSequence getUser(); Job getJob(JobId jobID); Map<JobId, Job> getAllJobs(); @SuppressWarnings("rawtypes") EventHandler getEventHandler(); Clock getClock(); ClusterInfo getClusterInfo(); Set<String> getBlacklistedNodes(); ClientToAMTokenSecretManager getClientToAMTokenSecretManager(); boolean isLastAMRetry(); boolean hasSuccessfullyUnregistered(); String getNMHostname(); TaskAttemptFinishingMonitor getTaskAttemptFinishingMonitor(); }
2,109
27.90411
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.lang.annotation.Annotation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SecurityInfo; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSelector; public class MRClientSecurityInfo extends SecurityInfo { @Override public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) { return null; } @Override public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) { if (!protocol.equals(MRClientProtocolPB.class)) { return null; } return new TokenInfo() { @Override public Class<? extends Annotation> annotationType() { return null; } @Override public Class<? extends TokenSelector<? extends TokenIdentifier>> value() { return ClientToAMTokenSelector.class; } }; } }
1,961
32.254237
78
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/ClusterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.util.Records; @InterfaceAudience.LimitedPrivate("MapReduce") public class ClusterInfo { private Resource maxContainerCapability; public ClusterInfo() { this.maxContainerCapability = Records.newRecord(Resource.class); } public ClusterInfo(Resource maxCapability) { this.maxContainerCapability = maxCapability; } public Resource getMaxContainerCapability() { return maxContainerCapability; } public void setMaxContainerCapability(Resource maxContainerCapability) { this.maxContainerCapability = maxContainerCapability; } }
1,553
34.318182
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app; import java.net.InetSocketAddress; import org.apache.hadoop.mapred.Task; import org.apache.hadoop.mapred.WrappedJvmID; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; /** * This class listens for changes to the state of a Task. */ public interface TaskAttemptListener { InetSocketAddress getAddress(); /** * Register a JVM with the listener. This should be called as soon as a * JVM ID is assigned to a task attempt, before it has been launched. * @param task the task itself for this JVM. * @param jvmID The ID of the JVM . */ void registerPendingTask(Task task, WrappedJvmID jvmID); /** * Register task attempt. This should be called when the JVM has been * launched. * * @param attemptID * the id of the attempt for this JVM. * @param jvmID the ID of the JVM. */ void registerLaunchedTask(TaskAttemptId attemptID, WrappedJvmID jvmID); /** * Unregister the JVM and the attempt associated with it. This should be * called when the attempt/JVM has finished executing and is being cleaned up. * @param attemptID the ID of the attempt. * @param jvmID the ID of the JVM for that attempt. */ void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID); }
2,096
33.377049
80
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.security.authorize; import org.apache.hadoop.classification.InterfaceAudience;
955
44.52381
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.security.authorize; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; /** * {@link PolicyProvider} for YARN MapReduce protocols. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class MRAMPolicyProvider extends PolicyProvider { private static final Service[] mapReduceApplicationMasterServices = new Service[] { new Service( MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL, TaskUmbilicalProtocol.class), new Service( MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT, MRClientProtocolPB.class) }; @Override public Service[] getServices() { return mapReduceApplicationMasterServices; } }
1,893
36.137255
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/ClientHSPolicyProvider.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.security.authorize; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol; import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.security.RefreshUserMappingsProtocol; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.Service; import org.apache.hadoop.tools.GetUserMappingsProtocol; /** * {@link PolicyProvider} for MapReduce history server protocols. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ClientHSPolicyProvider extends PolicyProvider { private static final Service[] mrHSServices = new Service[] { new Service( JHAdminConfig.MR_HS_SECURITY_SERVICE_AUTHORIZATION, HSClientProtocolPB.class), new Service( CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_GET_USER_MAPPINGS, GetUserMappingsProtocol.class), new Service( CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS, RefreshUserMappingsProtocol.class), new Service( JHAdminConfig.MR_HS_SECURITY_SERVICE_AUTHORIZATION_ADMIN_REFRESH, HSAdminRefreshProtocol.class) }; @Override public Service[] getServices() { return mrHSServices; } }
2,358
38.983051
92
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import java.util.HashMap; import java.util.Map; import com.sun.jersey.api.json.JSONConfiguration; import com.sun.jersey.api.json.JSONJAXBContext; import com.google.inject.Singleton; import javax.ws.rs.ext.ContextResolver; import javax.ws.rs.ext.Provider; import javax.xml.bind.JAXBContext; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptState; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; import org.apache.hadoop.yarn.webapp.RemoteExceptionData; @Singleton @Provider public class JAXBContextResolver implements ContextResolver<JAXBContext> { private final Map<Class, JAXBContext> typesContextMap; // you have to specify all the dao classes here private final Class[] cTypes = {AMAttemptInfo.class, AMAttemptsInfo.class, AppInfo.class, CounterInfo.class, JobTaskAttemptCounterInfo.class, JobTaskCounterInfo.class, TaskCounterGroupInfo.class, ConfInfo.class, JobCounterInfo.class, TaskCounterInfo.class, CounterGroupInfo.class, JobInfo.class, JobsInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class, TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class, ConfEntryInfo.class, RemoteExceptionData.class}; // these dao classes need root unwrapping private final Class[] rootUnwrappedTypes = {JobTaskAttemptState.class}; public JAXBContextResolver() throws Exception { JAXBContext context; JAXBContext unWrappedRootContext; this.typesContextMap = new HashMap<Class, JAXBContext>(); context = new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false) .build(), cTypes); unWrappedRootContext = new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(true) .build(), rootUnwrappedTypes); for (Class type : cTypes) { typesContextMap.put(type, context); } for (Class type : rootUnwrappedTypes) { typesContextMap.put(type, unWrappedRootContext); } } @Override public JAXBContext getContext(Class<?> objectType) { return typesContextMap.get(objectType); } }
4,147
42.208333
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.webapp; import org.apache.hadoop.classification.InterfaceAudience;
943
43.952381
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/InfoPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.view.InfoBlock; public class InfoPage extends AppView { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); setTitle("About the Application Master"); } @Override protected Class<? extends SubView> content() { return InfoBlock.class; } }
1,210
33.6
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/App.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import com.google.inject.Inject; import com.google.inject.servlet.RequestScoped; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; @RequestScoped public class App { final AppContext context; private Job job; private Task task; @Inject App(AppContext ctx) { context = ctx; } void setJob(Job job) { this.job = job; } public Job getJob() { return job; } void setTask(Task task) { this.task = task; } public Task getTask() { return task; } }
1,444
25.272727
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import org.apache.hadoop.yarn.webapp.SubView; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class TasksPage extends AppView { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); set(DATATABLES_ID, "tasks"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}"); set(initID(DATATABLES, "tasks"), tasksTableInit()); setTableStyles(html, "tasks"); } @Override protected Class<? extends SubView> content() { return TasksBlock.class; } private String tasksTableInit() { return tableInit() .append(", 'aaData': tasksTableData") .append(", bDeferRender: true") .append(", bProcessing: true") .append("\n, aoColumnDefs: [\n") .append("{'sType':'string', 'aTargets': [0]") .append(", 'mRender': parseHadoopID }") .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets': [1]") .append(", 'mRender': parseHadoopProgress }") .append("\n, {'sType':'numeric', 'aTargets': [4, 5]") .append(", 'mRender': renderHadoopDate }") .append("\n, {'sType':'numeric', 'aTargets': [6]") .append(", 'mRender': renderHadoopElapsedTime }]") // Sort by id upon page load .append(", aaSorting: [[0, 'asc']] }").toString(); } }
2,135
32.904762
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.yarn.util.StringHelper.join; import java.io.IOException; import java.net.URLDecoder; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.Controller; import org.apache.hadoop.yarn.webapp.View; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import com.google.common.base.Joiner; import com.google.inject.Inject; /** * This class renders the various pages that the web app supports. */ public class AppController extends Controller implements AMParams { private static final Log LOG = LogFactory.getLog(AppController.class); private static final Joiner JOINER = Joiner.on(""); protected final App app; protected AppController(App app, Configuration conf, RequestContext ctx, String title) { super(ctx); this.app = app; set(APP_ID, app.context.getApplicationID().toString()); set(RM_WEB, JOINER.join(MRWebAppUtil.getYARNWebappScheme(), WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(conf, MRWebAppUtil.getYARNHttpPolicy()))); } @Inject protected AppController(App app, Configuration conf, RequestContext ctx) { this(app, conf, ctx, "am"); } /** * Render the default(index.html) page for the Application Controller */ @Override public void index() { setTitle(join("MapReduce Application ", $(APP_ID))); } /** * Render the /info page with an overview of current application. */ public void info() { AppInfo info = new AppInfo(app, app.context); info("Application Master Overview"). _("Application ID:", info.getId()). _("Application Name:", info.getName()). _("User:", info.getUser()). _("Started on:", Times.format(info.getStartTime())). _("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime( info.getElapsedTime() )); render(InfoPage.class); } /** * @return The class that will render the /job page */ protected Class<? extends View> jobPage() { return JobPage.class; } /** * Render the /job page */ public void job() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } render(jobPage()); } /** * @return the class that will render the /jobcounters page */ protected Class<? extends View> countersPage() { return CountersPage.class; } /** * Render the /jobcounters page */ public void jobCounters() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } if (app.getJob() != null) { setTitle(join("Counters for ", $(JOB_ID))); } render(countersPage()); } /** * Display a page showing a task's counters */ public void taskCounters() { try { requireTask(); } catch (Exception e) { renderText(e.getMessage()); return; } if (app.getTask() != null) { setTitle(StringHelper.join("Counters for ", $(TASK_ID))); } render(countersPage()); } /** * @return the class that will render the /singlejobcounter page */ protected Class<? extends View> singleCounterPage() { return SingleCounterPage.class; } /** * Render the /singlejobcounter page * @throws IOException on any error. */ public void singleJobCounter() throws IOException{ try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } set(COUNTER_GROUP, URLDecoder.decode($(COUNTER_GROUP), "UTF-8")); set(COUNTER_NAME, URLDecoder.decode($(COUNTER_NAME), "UTF-8")); if (app.getJob() != null) { setTitle(StringHelper.join($(COUNTER_GROUP)," ",$(COUNTER_NAME), " for ", $(JOB_ID))); } render(singleCounterPage()); } /** * Render the /singletaskcounter page * @throws IOException on any error. */ public void singleTaskCounter() throws IOException{ try { requireTask(); } catch (Exception e) { renderText(e.getMessage()); return; } set(COUNTER_GROUP, URLDecoder.decode($(COUNTER_GROUP), "UTF-8")); set(COUNTER_NAME, URLDecoder.decode($(COUNTER_NAME), "UTF-8")); if (app.getTask() != null) { setTitle(StringHelper.join($(COUNTER_GROUP)," ",$(COUNTER_NAME), " for ", $(TASK_ID))); } render(singleCounterPage()); } /** * @return the class that will render the /tasks page */ protected Class<? extends View> tasksPage() { return TasksPage.class; } /** * Render the /tasks page */ public void tasks() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } if (app.getJob() != null) { try { String tt = $(TASK_TYPE); tt = tt.isEmpty() ? "All" : StringUtils.capitalize( org.apache.hadoop.util.StringUtils.toLowerCase( MRApps.taskType(tt).toString())); setTitle(join(tt, " Tasks for ", $(JOB_ID))); } catch (Exception e) { LOG.error("Failed to render tasks page with task type : " + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e); badRequest(e.getMessage()); } } render(tasksPage()); } /** * @return the class that will render the /task page */ protected Class<? extends View> taskPage() { return TaskPage.class; } /** * Render the /task page */ public void task() { try { requireTask(); } catch (Exception e) { renderText(e.getMessage()); return; } if (app.getTask() != null) { setTitle(join("Attempts for ", $(TASK_ID))); } render(taskPage()); } /** * @return the class that will render the /attempts page */ protected Class<? extends View> attemptsPage() { return AttemptsPage.class; } /** * Render the attempts page */ public void attempts() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } if (app.getJob() != null) { try { String taskType = $(TASK_TYPE); if (taskType.isEmpty()) { throw new RuntimeException("missing task-type."); } String attemptState = $(ATTEMPT_STATE); if (attemptState.isEmpty()) { throw new RuntimeException("missing attempt-state."); } setTitle(join(attemptState, " ", MRApps.taskType(taskType).toString(), " attempts in ", $(JOB_ID))); render(attemptsPage()); } catch (Exception e) { LOG.error("Failed to render attempts page with task type : " + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e); badRequest(e.getMessage()); } } } /** * @return the page that will be used to render the /conf page */ protected Class<? extends View> confPage() { return JobConfPage.class; } /** * Render the /conf page */ public void conf() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } render(confPage()); } /** * Render a BAD_REQUEST error. * @param s the error message to include. */ void badRequest(String s) { setStatus(HttpServletResponse.SC_BAD_REQUEST); String title = "Bad request: "; setTitle((s != null) ? join(title, s) : title); } /** * Render a NOT_FOUND error. * @param s the error message to include. */ void notFound(String s) { setStatus(HttpServletResponse.SC_NOT_FOUND); setTitle(join("Not found: ", s)); } /** * Render a ACCESS_DENIED error. * @param s the error message to include. */ void accessDenied(String s) { setStatus(HttpServletResponse.SC_FORBIDDEN); setTitle(join("Access denied: ", s)); } /** * check for job access. * @param job the job that is being accessed * @return True if the requesting user has permission to view the job */ boolean checkAccess(Job job) { String remoteUser = request().getRemoteUser(); UserGroupInformation callerUGI = null; if (remoteUser != null) { callerUGI = UserGroupInformation.createRemoteUser(remoteUser); } if (callerUGI != null && !job.checkAccess(callerUGI, JobACL.VIEW_JOB)) { return false; } return true; } /** * Ensure that a JOB_ID was passed into the page. */ public void requireJob() { if ($(JOB_ID).isEmpty()) { badRequest("missing job ID"); throw new RuntimeException("Bad Request: Missing job ID"); } JobId jobID = MRApps.toJobID($(JOB_ID)); app.setJob(app.context.getJob(jobID)); if (app.getJob() == null) { notFound($(JOB_ID)); throw new RuntimeException("Not Found: " + $(JOB_ID)); } /* check for acl access */ Job job = app.context.getJob(jobID); if (!checkAccess(job)) { accessDenied("User " + request().getRemoteUser() + " does not have " + " permission to view job " + $(JOB_ID)); throw new RuntimeException("Access denied: User " + request().getRemoteUser() + " does not have permission to view job " + $(JOB_ID)); } } /** * Ensure that a TASK_ID was passed into the page. */ public void requireTask() { if ($(TASK_ID).isEmpty()) { badRequest("missing task ID"); throw new RuntimeException("missing task ID"); } TaskId taskID = MRApps.toTaskID($(TASK_ID)); Job job = app.context.getJob(taskID.getJobId()); app.setJob(job); if (app.getJob() == null) { notFound(MRApps.toString(taskID.getJobId())); throw new RuntimeException("Not Found: " + $(JOB_ID)); } else { app.setTask(app.getJob().getTask(taskID)); if (app.getTask() == null) { notFound($(TASK_ID)); throw new RuntimeException("Not Found: " + $(TASK_ID)); } } if (!checkAccess(job)) { accessDenied("User " + request().getRemoteUser() + " does not have " + " permission to view job " + $(JOB_ID)); throw new RuntimeException("Access denied: User " + request().getRemoteUser() + " does not have permission to view job " + $(JOB_ID)); } } }
11,784
26.470862
80
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AttemptsPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.ATTEMPT_STATE; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.yarn.webapp.SubView; import com.google.inject.Inject; public class AttemptsPage extends TaskPage { static class FewAttemptsBlock extends TaskPage.AttemptsBlock { @Inject FewAttemptsBlock(App ctx, Configuration conf) { super(ctx, conf); } @Override protected boolean isValidRequest() { return true; } @Override protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) { return "<a href='" + url("task", taskId.toString()) + "'>" + ta.getId() + "</a>"; } @Override protected Collection<TaskAttempt> getTaskAttempts() { List<TaskAttempt> fewTaskAttemps = new ArrayList<TaskAttempt>(); String taskTypeStr = $(TASK_TYPE); TaskType taskType = MRApps.taskType(taskTypeStr); String attemptStateStr = $(ATTEMPT_STATE); TaskAttemptStateUI neededState = MRApps .taskAttemptState(attemptStateStr); for (Task task : super.app.getJob().getTasks(taskType).values()) { Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts(); for (TaskAttempt attempt : attempts.values()) { if (neededState.correspondsTo(attempt.getState())) { fewTaskAttemps.add(attempt); } } } return fewTaskAttemps; } } @Override protected Class<? extends SubView> content() { return FewAttemptsBlock.class; } }
3,116
35.244186
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import org.apache.hadoop.yarn.webapp.SubView; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class CountersPage extends AppView { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); String tid = $(TASK_ID); String activeNav = "3"; if(tid == null || tid.isEmpty()) { activeNav = "2"; } set(initID(ACCORDION, "nav"), "{autoHeight:false, active:"+activeNav+"}"); set(DATATABLES_SELECTOR, "#counters .dt-counters"); set(initSelector(DATATABLES), "{bJQueryUI:true, sDom:'t', iDisplayLength:-1}"); } @Override protected void postHead(Page.HTML<_> html) { html. style("#counters, .dt-counters { table-layout: fixed }", "#counters th { overflow: hidden; vertical-align: middle }", "#counters .dataTables_wrapper { min-height: 1em }", "#counters .group { width: 15em }", "#counters .name { width: 30em }"); } @Override protected Class<? extends SubView> content() { return CountersBlock.class; } }
1,977
34.963636
78
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; import java.security.AccessControlException; import java.security.PrivilegedExceptionAction; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskAttemptRequestPBImpl; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.BlacklistedNodesInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptState; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.webapp.BadRequestException; import org.apache.hadoop.yarn.webapp.NotFoundException; import com.google.common.base.Preconditions; import com.google.inject.Inject; @Path("/ws/v1/mapreduce") public class AMWebServices { private final AppContext appCtx; private final App app; private final MRClientService service; private @Context HttpServletResponse response; @Inject public AMWebServices(final App app, final AppContext context) { this.appCtx = context; this.app = app; this.service = new MRClientService(context); } Boolean hasAccess(Job job, HttpServletRequest request) { String remoteUser = request.getRemoteUser(); UserGroupInformation callerUGI = null; if (remoteUser != null) { callerUGI = UserGroupInformation.createRemoteUser(remoteUser); } if (callerUGI != null && !job.checkAccess(callerUGI, JobACL.VIEW_JOB)) { return false; } return true; } private void init() { //clear content type response.setContentType(null); } /** * convert a job id string to an actual job and handle all the error checking. */ public static Job getJobFromJobIdString(String jid, AppContext appCtx) throws NotFoundException { JobId jobId; Job job; try { jobId = MRApps.toJobID(jid); } catch (YarnRuntimeException e) { // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here // anymore but keeping it for now just in case other stuff starts failing. // Also, the webservice should ideally return BadRequest (HTTP:400) when // the id is malformed instead of NotFound (HTTP:404). The webserver on // top of which AMWebServices is built seems to automatically do that for // unhandled exceptions throw new NotFoundException(e.getMessage()); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } if (jobId == null) { throw new NotFoundException("job, " + jid + ", is not found"); } job = appCtx.getJob(jobId); if (job == null) { throw new NotFoundException("job, " + jid + ", is not found"); } return job; } /** * convert a task id string to an actual task and handle all the error * checking. */ public static Task getTaskFromTaskIdString(String tid, Job job) throws NotFoundException { TaskId taskID; Task task; try { taskID = MRApps.toTaskID(tid); } catch (YarnRuntimeException e) { // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here // anymore but keeping it for now just in case other stuff starts failing. // Also, the webservice should ideally return BadRequest (HTTP:400) when // the id is malformed instead of NotFound (HTTP:404). The webserver on // top of which AMWebServices is built seems to automatically do that for // unhandled exceptions throw new NotFoundException(e.getMessage()); } catch (NumberFormatException ne) { throw new NotFoundException(ne.getMessage()); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } if (taskID == null) { throw new NotFoundException("taskid " + tid + " not found or invalid"); } task = job.getTask(taskID); if (task == null) { throw new NotFoundException("task not found with id " + tid); } return task; } /** * convert a task attempt id string to an actual task attempt and handle all * the error checking. */ public static TaskAttempt getTaskAttemptFromTaskAttemptString(String attId, Task task) throws NotFoundException { TaskAttemptId attemptId; TaskAttempt ta; try { attemptId = MRApps.toTaskAttemptID(attId); } catch (YarnRuntimeException e) { // TODO: after MAPREDUCE-2793 YarnRuntimeException is probably not expected here // anymore but keeping it for now just in case other stuff starts failing. // Also, the webservice should ideally return BadRequest (HTTP:400) when // the id is malformed instead of NotFound (HTTP:404). The webserver on // top of which AMWebServices is built seems to automatically do that for // unhandled exceptions throw new NotFoundException(e.getMessage()); } catch (NumberFormatException ne) { throw new NotFoundException(ne.getMessage()); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } if (attemptId == null) { throw new NotFoundException("task attempt id " + attId + " not found or invalid"); } ta = task.getAttempt(attemptId); if (ta == null) { throw new NotFoundException("Error getting info on task attempt id " + attId); } return ta; } /** * check for job access. * * @param job * the job that is being accessed */ void checkAccess(Job job, HttpServletRequest request) { if (!hasAccess(job, request)) { throw new WebApplicationException(Status.UNAUTHORIZED); } } @GET @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AppInfo get() { return getAppInfo(); } @GET @Path("/info") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AppInfo getAppInfo() { init(); return new AppInfo(this.app, this.app.context); } @GET @Path("/blacklistednodes") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public BlacklistedNodesInfo getBlacklistedNodes() { init(); return new BlacklistedNodesInfo(this.app.context); } @GET @Path("/jobs") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobsInfo getJobs(@Context HttpServletRequest hsr) { init(); JobsInfo allJobs = new JobsInfo(); for (Job job : appCtx.getAllJobs().values()) { // getAllJobs only gives you a partial we want a full Job fullJob = appCtx.getJob(job.getID()); if (fullJob == null) { continue; } allJobs.add(new JobInfo(fullJob, hasAccess(fullJob, hsr))); } return allJobs; } @GET @Path("/jobs/{jobid}") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobInfo getJob(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) { init(); Job job = getJobFromJobIdString(jid, appCtx); return new JobInfo(job, hasAccess(job, hsr)); } @GET @Path("/jobs/{jobid}/jobattempts") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) { init(); Job job = getJobFromJobIdString(jid, appCtx); AMAttemptsInfo amAttempts = new AMAttemptsInfo(); for (AMInfo amInfo : job.getAMInfos()) { AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString( job.getID()), job.getUserName()); amAttempts.add(attempt); } return amAttempts; } @GET @Path("/jobs/{jobid}/counters") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); return new JobCounterInfo(this.appCtx, job); } @GET @Path("/jobs/{jobid}/conf") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public ConfInfo getJobConf(@Context HttpServletRequest hsr, @PathParam("jobid") String jid) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); ConfInfo info; try { info = new ConfInfo(job); } catch (IOException e) { throw new NotFoundException("unable to load configuration for job: " + jid); } return info; } @GET @Path("/jobs/{jobid}/tasks") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public TasksInfo getJobTasks(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @QueryParam("type") String type) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); TasksInfo allTasks = new TasksInfo(); for (Task task : job.getTasks().values()) { TaskType ttype = null; if (type != null && !type.isEmpty()) { try { ttype = MRApps.taskType(type); } catch (YarnRuntimeException e) { throw new BadRequestException("tasktype must be either m or r"); } } if (ttype != null && task.getType() != ttype) { continue; } allTasks.add(new TaskInfo(task)); } return allTasks; } @GET @Path("/jobs/{jobid}/tasks/{taskid}") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public TaskInfo getJobTask(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); return new TaskInfo(task); } @GET @Path("/jobs/{jobid}/tasks/{taskid}/counters") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobTaskCounterInfo getSingleTaskCounters( @Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); return new JobTaskCounterInfo(task); } @GET @Path("/jobs/{jobid}/tasks/{taskid}/attempts") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid) { init(); TaskAttemptsInfo attempts = new TaskAttemptsInfo(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); for (TaskAttempt ta : task.getAttempts().values()) { if (ta != null) { if (task.getType() == TaskType.REDUCE) { attempts.add(new ReduceTaskAttemptInfo(ta, task.getType())); } else { attempts.add(new TaskAttemptInfo(ta, task.getType(), true)); } } } return attempts; } @GET @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task); if (task.getType() == TaskType.REDUCE) { return new ReduceTaskAttemptInfo(ta, task.getType()); } else { return new TaskAttemptInfo(ta, task.getType(), true); } } @GET @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/state") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobTaskAttemptState getJobTaskAttemptState( @Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) throws IOException, InterruptedException { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task); return new JobTaskAttemptState(ta.getState().toString()); } @PUT @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/state") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public Response updateJobTaskAttemptState(JobTaskAttemptState targetState, @Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) throws IOException, InterruptedException { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); String remoteUser = hsr.getRemoteUser(); UserGroupInformation callerUGI = null; if (remoteUser != null) { callerUGI = UserGroupInformation.createRemoteUser(remoteUser); } Task task = getTaskFromTaskIdString(tid, job); TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task); if (!ta.getState().toString().equals(targetState.getState())) { // user is attempting to change state. right we only // allow users to kill the job task attempt if (targetState.getState().equals(TaskAttemptState.KILLED.toString())) { return killJobTaskAttempt(ta, callerUGI, hsr); } throw new BadRequestException("Only '" + TaskAttemptState.KILLED.toString() + "' is allowed as a target state."); } JobTaskAttemptState ret = new JobTaskAttemptState(); ret.setState(ta.getState().toString()); return Response.status(Status.OK).entity(ret).build(); } @GET @Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters") @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters( @Context HttpServletRequest hsr, @PathParam("jobid") String jid, @PathParam("taskid") String tid, @PathParam("attemptid") String attId) { init(); Job job = getJobFromJobIdString(jid, appCtx); checkAccess(job, hsr); Task task = getTaskFromTaskIdString(tid, job); TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task); return new JobTaskAttemptCounterInfo(ta); } protected Response killJobTaskAttempt(TaskAttempt ta, UserGroupInformation callerUGI, HttpServletRequest hsr) throws IOException, InterruptedException { Preconditions.checkNotNull(ta, "ta cannot be null"); String userName = callerUGI.getUserName(); final TaskAttemptId attemptId = ta.getID(); try { callerUGI .doAs(new PrivilegedExceptionAction<KillTaskAttemptResponse>() { @Override public KillTaskAttemptResponse run() throws IOException, YarnException { KillTaskAttemptRequest req = new KillTaskAttemptRequestPBImpl(); req.setTaskAttemptId(attemptId); return service.forceKillTaskAttempt(req); } }); } catch (UndeclaredThrowableException ue) { // if the root cause is a permissions issue // bubble that up to the user if (ue.getCause() instanceof YarnException) { YarnException ye = (YarnException) ue.getCause(); if (ye.getCause() instanceof AccessControlException) { String taId = attemptId.toString(); String msg = "Unauthorized attempt to kill task attempt " + taId + " by remote user " + userName; return Response.status(Status.FORBIDDEN).entity(msg).build(); } else { throw ue; } } else { throw ue; } } JobTaskAttemptState ret = new JobTaskAttemptState(); ret.setState(TaskAttemptState.KILLED.toString()); return Response.status(Status.OK).entity(ret).build(); } }
19,436
36.022857
98
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppView.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; public class AppView extends TwoColumnLayout { @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); set(DATATABLES_ID, "jobs"); set(initID(DATATABLES, "jobs"), jobsTableInit()); setTableStyles(html, "jobs"); } protected void commonPreHead(Page.HTML<_> html) { set(ACCORDION_ID, "nav"); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:1}"); } @Override protected Class<? extends SubView> nav() { return NavBlock.class; } @Override protected Class<? extends SubView> content() { return JobsBlock.class; } private String jobsTableInit() { return tableInit(). // Sort by id upon page load append(", aaSorting: [[0, 'asc']]"). append(",aoColumns:[{sType:'title-numeric'},"). append("null,null,{sType:'title-numeric', bSearchable:false},null,"). append("null,{sType:'title-numeric',bSearchable:false}, null, null]}"). toString(); } }
1,984
32.083333
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_TABLE; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; import java.util.Map; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class CountersBlock extends HtmlBlock { Job job; Task task; Counters total; Counters map; Counters reduce; @Inject CountersBlock(AppContext appCtx, ViewContext ctx) { super(ctx); getCounters(appCtx); } @Override protected void render(Block html) { if (job == null) { html. p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._(); return; } if (!$(TASK_ID).isEmpty() && task == null) { html. p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._(); return; } if(total == null || total.getGroupNames() == null || total.countCounters() == 0) { String type = $(TASK_ID); if(type == null || type.isEmpty()) { type = $(JOB_ID, "the job"); } html. p()._("Sorry it looks like ",type," has no counters.")._(); return; } String urlBase; String urlId; if(task != null) { urlBase = "singletaskcounter"; urlId = MRApps.toString(task.getID()); } else { urlBase = "singlejobcounter"; urlId = MRApps.toString(job.getID()); } int numGroups = 0; TBODY<TABLE<DIV<Hamlet>>> tbody = html. div(_INFO_WRAP). table("#counters"). thead(). tr(). th(".group.ui-state-default", "Counter Group"). th(".ui-state-default", "Counters")._()._(). tbody(); for (CounterGroup g : total) { CounterGroup mg = map == null ? null : map.getGroup(g.getName()); CounterGroup rg = reduce == null ? null : reduce.getGroup(g.getName()); ++numGroups; // This is mostly for demonstration :) Typically we'd introduced // a CounterGroup block to reduce the verbosity. OTOH, this // serves as an indicator of where we're in the tag hierarchy. TR<THEAD<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupHeadRow = tbody. tr(). th().$title(g.getName()).$class("ui-state-default"). _(fixGroupDisplayName(g.getDisplayName()))._(). td().$class(C_TABLE). table(".dt-counters").$id(job.getID()+"."+g.getName()). thead(). tr().th(".name", "Name"); if (map != null) { groupHeadRow.th("Map").th("Reduce"); } // Ditto TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow. th(map == null ? "Value" : "Total")._()._(). tbody(); for (Counter counter : g) { // Ditto TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group. tr(); if (task == null && mg == null && rg == null) { groupRow.td().$title(counter.getName())._(counter.getDisplayName()). _(); } else { groupRow.td().$title(counter.getName()). a(url(urlBase,urlId,g.getName(), counter.getName()), counter.getDisplayName()). _(); } if (map != null) { Counter mc = mg == null ? null : mg.findCounter(counter.getName()); Counter rc = rg == null ? null : rg.findCounter(counter.getName()); groupRow. td(mc == null ? "0" : String.format("%,d", mc.getValue())). td(rc == null ? "0" : String.format("%,d", rc.getValue())); } groupRow.td(String.format("%,d", counter.getValue()))._(); } group._()._()._()._(); } tbody._()._()._(); } private void getCounters(AppContext ctx) { JobId jobID = null; TaskId taskID = null; String tid = $(TASK_ID); if (!tid.isEmpty()) { taskID = MRApps.toTaskID(tid); jobID = taskID.getJobId(); } else { String jid = $(JOB_ID); if (jid != null && !jid.isEmpty()) { jobID = MRApps.toJobID(jid); } } if (jobID == null) { return; } job = ctx.getJob(jobID); if (job == null) { return; } if (taskID != null) { task = job.getTask(taskID); if (task == null) { return; } total = task.getCounters(); return; } // Get all types of counters Map<TaskId, Task> tasks = job.getTasks(); total = job.getAllCounters(); boolean needTotalCounters = false; if (total == null) { total = new Counters(); needTotalCounters = true; } map = new Counters(); reduce = new Counters(); for (Task t : tasks.values()) { Counters counters = t.getCounters(); if (counters == null) { continue; } switch (t.getType()) { case MAP: map.incrAllCounters(counters); break; case REDUCE: reduce.incrAllCounters(counters); break; } if (needTotalCounters) { total.incrAllCounters(counters); } } } private String fixGroupDisplayName(CharSequence name) { return name.toString().replace(".", ".\u200B").replace("$", "\u200B$"); } }
7,028
32.793269
86
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; import org.apache.hadoop.mapreduce.v2.app.webapp.SingleCounterBlock; import org.apache.hadoop.yarn.webapp.SubView; /** * Render the counters page */ public class SingleCounterPage extends AppView { /* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); String tid = $(TASK_ID); String activeNav = "3"; if(tid == null || tid.isEmpty()) { activeNav = "2"; } set(initID(ACCORDION, "nav"), "{autoHeight:false, active:"+activeNav+"}"); set(DATATABLES_ID, "singleCounter"); set(initID(DATATABLES, "singleCounter"), counterTableInit()); setTableStyles(html, "singleCounter"); } /** * @return The end of a javascript map that is the jquery datatable * configuration for the jobs table. the Jobs table is assumed to be * rendered by the class returned from {@link #content()} */ private String counterTableInit() { return tableInit(). append(",aoColumnDefs:["). append("{'sType':'title-numeric', 'aTargets': [ 1 ] }"). append("]}"). toString(); } /** * The content of this page is the CountersBlock now. * @return CountersBlock.class */ @Override protected Class<? extends SubView> content() { return SingleCounterBlock.class; } }
2,398
33.271429
115
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobsBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class JobsBlock extends HtmlBlock { final AppContext appContext; @Inject JobsBlock(AppContext appCtx) { appContext = appCtx; } @Override protected void render(Block html) { TBODY<TABLE<Hamlet>> tbody = html. h2("Active Jobs"). table("#jobs"). thead(). tr(). th(".id", "Job ID"). th(".name", "Name"). th(".state", "State"). th("Map Progress"). th("Maps Total"). th("Maps Completed"). th("Reduce Progress"). th("Reduces Total"). th("Reduces Completed")._()._(). tbody(); for (Job j : appContext.getAllJobs().values()) { JobInfo job = new JobInfo(j, false); tbody. tr(). td(). span().$title(String.valueOf(job.getId()))._(). // for sorting a(url("job", job.getId()), job.getId())._(). td(job.getName()). td(job.getState()). td(). span().$title(job.getMapProgressPercent())._(). // for sorting div(_PROGRESSBAR). $title(join(job.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). $style(join("width:", job.getMapProgressPercent(), '%'))._()._()._(). td(String.valueOf(job.getMapsTotal())). td(String.valueOf(job.getMapsCompleted())). td(). span().$title(job.getReduceProgressPercent())._(). // for sorting div(_PROGRESSBAR). $title(join(job.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). $style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._(). td(String.valueOf(job.getReducesTotal())). td(String.valueOf(job.getReducesCompleted()))._(); } tbody._()._(); } }
3,347
37.482759
88
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.postInitID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import org.apache.hadoop.mapreduce.v2.app.webapp.ConfBlock; import org.apache.hadoop.yarn.webapp.SubView; /** * Render a page with the configuration for a given job in it. */ public class JobConfPage extends AppView { /* * (non-Javadoc) * @see org.apache.hadoop.mapreduce.v2.hs.webapp.HsView#preHead(org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML) */ @Override protected void preHead(Page.HTML<_> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("Configuration for MapReduce Job ", $(JOB_ID))); commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}"); set(DATATABLES_ID, "conf"); set(initID(DATATABLES, "conf"), confTableInit()); set(postInitID(DATATABLES, "conf"), confPostTableInit()); setTableStyles(html, "conf"); } /** * The body of this block is the configuration block. * @return ConfBlock.class */ @Override protected Class<? extends SubView> content() { return ConfBlock.class; } /** * @return the end of the JS map that is the jquery datatable config for the * conf table. */ private String confTableInit() { return tableInit().append("}").toString(); } /** * @return the java script code to allow the jquery conf datatable to filter * by column. */ private String confPostTableInit() { return "var confInitVals = new Array();\n" + "$('tfoot input').keyup( function () \n{"+ " confDataTable.fnFilter( this.value, $('tfoot input').index(this) );\n"+ "} );\n"+ "$('tfoot input').each( function (i) {\n"+ " confInitVals[i] = this.value;\n"+ "} );\n"+ "$('tfoot input').focus( function () {\n"+ " if ( this.className == 'search_init' )\n"+ " {\n"+ " this.className = '';\n"+ " this.value = '';\n"+ " }\n"+ "} );\n"+ "$('tfoot input').blur( function (i) {\n"+ " if ( this.value == '' )\n"+ " {\n"+ " this.className = 'search_init';\n"+ " this.value = confInitVals[$('tfoot input').index(this)];\n"+ " }\n"+ "} );\n"; } }
3,521
34.938776
115
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebApp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.yarn.util.StringHelper.pajoin; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; /** * Application master webapp */ public class AMWebApp extends WebApp implements AMParams { @Override public void setup() { bind(JAXBContextResolver.class); bind(GenericExceptionHandler.class); bind(AMWebServices.class); route("/", AppController.class); route("/app", AppController.class); route(pajoin("/job", JOB_ID), AppController.class, "job"); route(pajoin("/conf", JOB_ID), AppController.class, "conf"); route(pajoin("/jobcounters", JOB_ID), AppController.class, "jobCounters"); route(pajoin("/singlejobcounter",JOB_ID, COUNTER_GROUP, COUNTER_NAME), AppController.class, "singleJobCounter"); route(pajoin("/tasks", JOB_ID, TASK_TYPE, TASK_STATE), AppController.class, "tasks"); route(pajoin("/attempts", JOB_ID, TASK_TYPE, ATTEMPT_STATE), AppController.class, "attempts"); route(pajoin("/task", TASK_ID), AppController.class, "task"); route(pajoin("/taskcounters", TASK_ID), AppController.class, "taskCounters"); route(pajoin("/singletaskcounter",TASK_ID, COUNTER_GROUP, COUNTER_NAME), AppController.class, "singleTaskCounter"); } }
2,146
40.288462
89
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; import java.util.EnumSet; import java.util.Collection; import org.apache.commons.lang.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class TaskPage extends AppView { static class AttemptsBlock extends HtmlBlock { final App app; final boolean enableUIActions; @Inject AttemptsBlock(App ctx, Configuration conf) { app = ctx; this.enableUIActions = conf.getBoolean(MRConfig.MASTER_WEBAPP_UI_ACTIONS_ENABLED, MRConfig.DEFAULT_MASTER_WEBAPP_UI_ACTIONS_ENABLED); } @Override protected void render(Block html) { if (!isValidRequest()) { html. h2($(TITLE)); return; } JobId jobId = app.getJob().getID(); if (enableUIActions) { // Kill task attempt StringBuilder script = new StringBuilder(); script .append("function confirmAction(appID, jobID, taskID, attID) {\n") .append(" var b = confirm(\"Are you sure?\");\n") .append(" if (b == true) {\n") .append(" var current = '/proxy/' + appID") .append(" + '/mapreduce/task/' + taskID;\n") .append(" var stateURL = '/proxy/' + appID") .append(" + '/ws/v1/mapreduce/jobs/' + jobID") .append(" + '/tasks/' + taskID") .append(" + '/attempts/' + attID + '/state';\n") .append(" $.ajax({\n") .append(" type: 'PUT',\n") .append(" url: stateURL,\n") .append(" contentType: 'application/json',\n") .append(" data: '{\"state\":\"KILLED\"}',\n") .append(" dataType: 'json'\n") .append(" }).done(function(data) {\n") .append(" setTimeout(function() {\n") .append(" location.href = current;\n") .append(" }, 1000);\n") .append(" }).fail(function(data) {\n") .append(" console.log(data);\n") .append(" });\n") .append(" }\n") .append("}\n"); html.script().$type("text/javascript")._(script.toString())._(); } TR<THEAD<TABLE<Hamlet>>> tr = html.table("#attempts").thead().tr(); tr.th(".id", "Attempt"). th(".progress", "Progress"). th(".state", "State"). th(".status", "Status"). th(".node", "Node"). th(".logs", "Logs"). th(".tsh", "Started"). th(".tsh", "Finished"). th(".tsh", "Elapsed"). th(".note", "Note"); if (enableUIActions) { tr.th(".actions", "Actions"); } TBODY<TABLE<Hamlet>> tbody = tr._()._().tbody(); // Write all the data into a JavaScript array of arrays for JQuery // DataTables to display StringBuilder attemptsTableData = new StringBuilder("[\n"); for (TaskAttempt attempt : getTaskAttempts()) { TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true); String progress = StringUtils.formatPercent(ta.getProgress() / 100, 2); String nodeHttpAddr = ta.getNode(); String diag = ta.getNote() == null ? "" : ta.getNote(); TaskId taskId = attempt.getID().getTaskId(); attemptsTableData.append("[\"") .append(getAttemptId(taskId, ta)).append("\",\"") .append(progress).append("\",\"") .append(ta.getState().toString()).append("\",\"") .append(StringEscapeUtils.escapeJavaScript( StringEscapeUtils.escapeHtml(ta.getStatus()))).append("\",\"") .append(nodeHttpAddr == null ? "N/A" : "<a class='nodelink' href='" + MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddr + "'>" + nodeHttpAddr + "</a>") .append("\",\"") .append(ta.getAssignedContainerId() == null ? "N/A" : "<a class='logslink' href='" + url(MRWebAppUtil.getYARNWebappScheme(), nodeHttpAddr, "node" , "containerlogs", ta.getAssignedContainerIdStr(), app.getJob() .getUserName()) + "'>logs</a>") .append("\",\"") .append(ta.getStartTime()).append("\",\"") .append(ta.getFinishTime()).append("\",\"") .append(ta.getElapsedTime()).append("\",\"") .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml( diag))); if (enableUIActions) { attemptsTableData.append("\",\""); if (EnumSet.of( TaskAttemptState.SUCCEEDED, TaskAttemptState.FAILED, TaskAttemptState.KILLED).contains(attempt.getState())) { attemptsTableData.append("N/A"); } else { attemptsTableData .append("<a href=javascript:void(0) onclick=confirmAction('") .append(jobId.getAppId()).append("','") .append(jobId).append("','") .append(attempt.getID().getTaskId()).append("','") .append(ta.getId()) .append("');>Kill</a>"); } attemptsTableData.append("\"],\n"); } } //Remove the last comma and close off the array of arrays if(attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') { attemptsTableData.delete(attemptsTableData.length()-2, attemptsTableData.length()-1); } attemptsTableData.append("]"); html.script().$type("text/javascript"). _("var attemptsTableData=" + attemptsTableData)._(); tbody._()._(); } protected String getAttemptId(TaskId taskId, TaskAttemptInfo ta) { return ta.getId(); } protected boolean isValidRequest() { return app.getTask() != null; } protected Collection<TaskAttempt> getTaskAttempts() { return app.getTask().getAttempts().values(); } } @Override protected void preHead(Page.HTML<_> html) { commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:3}"); set(DATATABLES_ID, "attempts"); set(initID(DATATABLES, "attempts"), attemptsTableInit()); setTableStyles(html, "attempts"); } @Override protected Class<? extends SubView> content() { return AttemptsBlock.class; } private String attemptsTableInit() { return tableInit() .append(", 'aaData': attemptsTableData") .append(", bDeferRender: true") .append(", bProcessing: true") .append("\n,aoColumnDefs:[\n") //logs column should not filterable (it includes container ID which may pollute searches) .append("\n{'aTargets': [ 5 ]") .append(", 'bSearchable': false }") .append("\n, {'sType':'string', 'aTargets': [ 0 ]") .append(", 'mRender': parseHadoopID }") .append("\n, {'sType':'numeric', 'aTargets': [ 6, 7") .append(" ], 'mRender': renderHadoopDate }") .append("\n, {'sType':'numeric', 'aTargets': [ 8") .append(" ], 'mRender': renderHadoopElapsedTime }]") // Sort by id upon page load .append("\n, aaSorting: [[0, 'asc']]") .append("}").toString(); } }
9,086
37.180672
101
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_GROUP; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_NAME; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class SingleCounterBlock extends HtmlBlock { protected TreeMap<String, Long> values = new TreeMap<String, Long>(); protected Job job; protected Task task; @Inject SingleCounterBlock(AppContext appCtx, ViewContext ctx) { super(ctx); this.populateMembers(appCtx); } @Override protected void render(Block html) { if (job == null) { html. p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._(); return; } if (!$(TASK_ID).isEmpty() && task == null) { html. p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._(); return; } String columnType = task == null ? "Task" : "Task Attempt"; TBODY<TABLE<DIV<Hamlet>>> tbody = html. div(_INFO_WRAP). table("#singleCounter"). thead(). tr(). th(".ui-state-default", columnType). th(".ui-state-default", "Value")._()._(). tbody(); for (Map.Entry<String, Long> entry : values.entrySet()) { TR<TBODY<TABLE<DIV<Hamlet>>>> row = tbody.tr(); String id = entry.getKey(); String val = entry.getValue().toString(); if(task != null) { row.td(id); row.td().br().$title(val)._()._(val)._(); } else { row.td().a(url("singletaskcounter",entry.getKey(), $(COUNTER_GROUP), $(COUNTER_NAME)), id)._(); row.td().br().$title(val)._().a(url("singletaskcounter",entry.getKey(), $(COUNTER_GROUP), $(COUNTER_NAME)), val)._(); } row._(); } tbody._()._()._(); } private void populateMembers(AppContext ctx) { JobId jobID = null; TaskId taskID = null; String tid = $(TASK_ID); if (!tid.isEmpty()) { taskID = MRApps.toTaskID(tid); jobID = taskID.getJobId(); } else { String jid = $(JOB_ID); if (!jid.isEmpty()) { jobID = MRApps.toJobID(jid); } } if (jobID == null) { return; } job = ctx.getJob(jobID); if (job == null) { return; } if (taskID != null) { task = job.getTask(taskID); if (task == null) { return; } for(Map.Entry<TaskAttemptId, TaskAttempt> entry : task.getAttempts().entrySet()) { long value = 0; Counters counters = entry.getValue().getCounters(); CounterGroup group = (counters != null) ? counters .getGroup($(COUNTER_GROUP)) : null; if(group != null) { Counter c = group.findCounter($(COUNTER_NAME)); if(c != null) { value = c.getValue(); } } values.put(MRApps.toString(entry.getKey()), value); } return; } // Get all types of counters Map<TaskId, Task> tasks = job.getTasks(); for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) { long value = 0; Counters counters = entry.getValue().getCounters(); CounterGroup group = (counters != null) ? counters .getGroup($(COUNTER_GROUP)) : null; if(group != null) { Counter c = group.findCounter($(COUNTER_NAME)); if(c != null) { value = c.getValue(); } } values.put(MRApps.toString(entry.getKey()), value); } } }
5,469
33.402516
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobPage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; import org.apache.hadoop.yarn.webapp.SubView; public class JobPage extends AppView { @Override protected void preHead(Page.HTML<_> html) { String jobID = $(JOB_ID); set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID" : join("MapReduce Job ", $(JOB_ID))); commonPreHead(html); set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}"); } @Override protected Class<? extends SubView> content() { return JobBlock.class; } }
1,593
36.069767
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TasksBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_STATE; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE; import org.apache.commons.lang.StringEscapeUtils; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class TasksBlock extends HtmlBlock { final App app; @Inject TasksBlock(App app) { this.app = app; } @Override protected void render(Block html) { if (app.getJob() == null) { html. h2($(TITLE)); return; } TaskType type = null; String symbol = $(TASK_TYPE); if (!symbol.isEmpty()) { type = MRApps.taskType(symbol); } TBODY<TABLE<Hamlet>> tbody = html. table("#tasks"). thead(). tr(). th("Task"). th("Progress"). th("Status"). th("State"). th("Start Time"). th("Finish Time"). th("Elapsed Time")._()._(). tbody(); StringBuilder tasksTableData = new StringBuilder("[\n"); for (Task task : app.getJob().getTasks().values()) { if (type != null && task.getType() != type) { continue; } String taskStateStr = $(TASK_STATE); if (taskStateStr == null || taskStateStr.trim().equals("")) { taskStateStr = "ALL"; } if (!taskStateStr.equalsIgnoreCase("ALL")) { try { // get stateUI enum MRApps.TaskStateUI stateUI = MRApps.taskState(taskStateStr); if (!stateUI.correspondsTo(task.getState())) { continue; } } catch (IllegalArgumentException e) { continue; // not supported state, ignore } } TaskInfo info = new TaskInfo(task); String tid = info.getId(); String pct = StringUtils.formatPercent(info.getProgress() / 100, 2); tasksTableData.append("[\"<a href='").append(url("task", tid)) .append("'>").append(tid).append("</a>\",\"") //Progress bar .append("<br title='").append(pct) .append("'> <div class='").append(C_PROGRESSBAR).append("' title='") .append(join(pct, '%')).append("'> ").append("<div class='") .append(C_PROGRESSBAR_VALUE).append("' style='") .append(join("width:", pct, '%')).append("'> </div> </div>\",\"") .append(StringEscapeUtils.escapeJavaScript( StringEscapeUtils.escapeHtml(info.getStatus()))).append("\",\"") .append(info.getState()).append("\",\"") .append(info.getStartTime()).append("\",\"") .append(info.getFinishTime()).append("\",\"") .append(info.getElapsedTime()).append("\"],\n"); } //Remove the last comma and close off the array of arrays if(tasksTableData.charAt(tasksTableData.length() - 2) == ',') { tasksTableData.delete(tasksTableData.length()-2, tasksTableData.length()-1); } tasksTableData.append("]"); html.script().$type("text/javascript"). _("var tasksTableData=" + tasksTableData)._(); tbody._()._(); } }
4,540
35.328
82
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMParams.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; /** * Params constants for the AM webapp and the history webapp. */ public interface AMParams { static final String RM_WEB = "rm.web"; static final String APP_ID = "app.id"; static final String JOB_ID = "job.id"; static final String TASK_ID = "task.id"; static final String TASK_TYPE = "task.type"; static final String TASK_STATE = "task.state"; static final String ATTEMPT_STATE = "attempt.state"; static final String COUNTER_GROUP = "counter.group"; static final String COUNTER_NAME = "counter.name"; }
1,369
38.142857
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.RM_WEB; import java.util.List; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; public class NavBlock extends HtmlBlock { final App app; @Inject NavBlock(App app) { this.app = app; } @Override protected void render(Block html) { String rmweb = $(RM_WEB); DIV<Hamlet> nav = html. div("#nav"). h3("Cluster"). ul(). li().a(url(rmweb, "cluster", "cluster"), "About")._(). li().a(url(rmweb, "cluster", "apps"), "Applications")._(). li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._(). h3("Application"). ul(). li().a(url("app/info"), "About")._(). li().a(url("app"), "Jobs")._()._(); if (app.getJob() != null) { String jobid = MRApps.toString(app.getJob().getID()); List<AMInfo> amInfos = app.getJob().getAMInfos(); AMInfo thisAmInfo = amInfos.get(amInfos.size()-1); String nodeHttpAddress = thisAmInfo.getNodeManagerHost() + ":" + thisAmInfo.getNodeManagerHttpPort(); nav. h3("Job"). ul(). li().a(url("job", jobid), "Overview")._(). li().a(url("jobcounters", jobid), "Counters")._(). li().a(url("conf", jobid), "Configuration")._(). li().a(url("tasks", jobid, "m"), "Map tasks")._(). li().a(url("tasks", jobid, "r"), "Reduce tasks")._(). li().a(".logslink", url(MRWebAppUtil.getYARNWebappScheme(), nodeHttpAddress, "node", "containerlogs", thisAmInfo.getContainerId().toString(), app.getJob().getUserName()), "AM Logs")._()._(); if (app.getTask() != null) { String taskid = MRApps.toString(app.getTask().getID()); nav. h3("Task"). ul(). li().a(url("task", taskid), "Task Overview")._(). li().a(url("taskcounters", taskid), "Counters")._()._(); } } nav. h3("Tools"). ul(). li().a("/conf", "Configuration")._(). li().a("/logs", "Local logs")._(). li().a("/stacks", "Server stacks")._(). li().a("/jmx?qry=Hadoop:*", "Server metrics")._()._()._(); } }
3,389
37.089888
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY; import org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; /** * Render the configuration for this job. */ public class ConfBlock extends HtmlBlock { final AppContext appContext; @Inject ConfBlock(AppContext appctx) { appContext = appctx; } /* * (non-Javadoc) * @see org.apache.hadoop.yarn.webapp.view.HtmlBlock#render(org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block) */ @Override protected void render(Block html) { String jid = $(JOB_ID); if (jid.isEmpty()) { html. p()._("Sorry, can't do anything without a JobID.")._(); return; } JobId jobID = MRApps.toJobID(jid); Job job = appContext.getJob(jobID); if (job == null) { html. p()._("Sorry, ", jid, " not found.")._(); return; } Path confPath = job.getConfFile(); try { ConfInfo info = new ConfInfo(job); html.div().h3(confPath.toString())._(); TBODY<TABLE<Hamlet>> tbody = html. // Tasks table table("#conf"). thead(). tr(). th(_TH, "key"). th(_TH, "value"). th(_TH, "source chain"). _(). _(). tbody(); for (ConfEntryInfo entry : info.getProperties()) { StringBuffer buffer = new StringBuffer(); String[] sources = entry.getSource(); //Skip the last entry, because it is always the same HDFS file, and // output them in reverse order so most recent is output first boolean first = true; for(int i = (sources.length - 2); i >= 0; i--) { if(!first) { // \u2B05 is an arrow <-- buffer.append(" \u2B05 "); } first = false; buffer.append(sources[i]); } tbody. tr(). td(entry.getName()). td(entry.getValue()). td(buffer.toString()). _(); } tbody._(). tfoot(). tr(). th().input("search_init").$type(InputType.text).$name("key").$value("key")._()._(). th().input("search_init").$type(InputType.text).$name("value").$value("value")._()._(). th().input("search_init").$type(InputType.text).$name("source chain").$value("source chain")._()._(). _(). _(). _(); } catch(IOException e) { LOG.error("Error while reading "+confPath, e); html.p()._("Sorry got an error while reading conf file. ",confPath); } } }
4,111
32.983471
113
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp; import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE; import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.util.Date; import java.util.List; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo; import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import org.apache.hadoop.yarn.webapp.view.InfoBlock; import com.google.inject.Inject; public class JobBlock extends HtmlBlock { final AppContext appContext; @Inject JobBlock(AppContext appctx) { appContext = appctx; } @Override protected void render(Block html) { String jid = $(JOB_ID); if (jid.isEmpty()) { html. p()._("Sorry, can't do anything without a JobID.")._(); return; } JobId jobID = MRApps.toJobID(jid); Job job = appContext.getJob(jobID); if (job == null) { html. p()._("Sorry, ", jid, " not found.")._(); return; } List<AMInfo> amInfos = job.getAMInfos(); String amString = amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters"; JobInfo jinfo = new JobInfo(job, true); info("Job Overview"). _("Job Name:", jinfo.getName()). _("User Name:", jinfo.getUserName()). _("Queue Name:", jinfo.getQueueName()). _("State:", jinfo.getState()). _("Uberized:", jinfo.isUberized()). _("Started:", new Date(jinfo.getStartTime())). _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime())); DIV<Hamlet> div = html. _(InfoBlock.class). div(_INFO_WRAP); // MRAppMasters Table TABLE<DIV<Hamlet>> table = div.table("#job"); table. tr(). th(amString). _(). tr(). th(_TH, "Attempt Number"). th(_TH, "Start Time"). th(_TH, "Node"). th(_TH, "Logs"). _(); for (AMInfo amInfo : amInfos) { AMAttemptInfo attempt = new AMAttemptInfo(amInfo, jinfo.getId(), jinfo.getUserName()); table.tr(). td(String.valueOf(attempt.getAttemptId())). td(new Date(attempt.getStartTime()).toString()). td().a(".nodelink", url(MRWebAppUtil.getYARNWebappScheme(), attempt.getNodeHttpAddress()), attempt.getNodeHttpAddress())._(). td().a(".logslink", url(attempt.getLogsLink()), "logs")._(). _(); } table._(); div._(); html.div(_INFO_WRAP). // Tasks table table("#job"). tr(). th(_TH, "Task Type"). th(_TH, "Progress"). th(_TH, "Total"). th(_TH, "Pending"). th(_TH, "Running"). th(_TH, "Complete")._(). tr(_ODD). th("Map"). td(). div(_PROGRESSBAR). $title(join(jinfo.getMapProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). $style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._(). td().a(url("tasks", jid, "m", "ALL"),String.valueOf(jinfo.getMapsTotal()))._(). td().a(url("tasks", jid, "m", "PENDING"),String.valueOf(jinfo.getMapsPending()))._(). td().a(url("tasks", jid, "m", "RUNNING"),String.valueOf(jinfo.getMapsRunning()))._(). td().a(url("tasks", jid, "m", "COMPLETED"),String.valueOf(jinfo.getMapsCompleted()))._()._(). tr(_EVEN). th("Reduce"). td(). div(_PROGRESSBAR). $title(join(jinfo.getReduceProgressPercent(), '%')). // tooltip div(_PROGRESSBAR_VALUE). $style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._(). td().a(url("tasks", jid, "r", "ALL"),String.valueOf(jinfo.getReducesTotal()))._(). td().a(url("tasks", jid, "r", "PENDING"),String.valueOf(jinfo.getReducesPending()))._(). td().a(url("tasks", jid, "r", "RUNNING"),String.valueOf(jinfo.getReducesRunning()))._(). td().a(url("tasks", jid, "r", "COMPLETED"),String.valueOf(jinfo.getReducesCompleted()))._()._() ._(). // Attempts table table("#job"). tr(). th(_TH, "Attempt Type"). th(_TH, "New"). th(_TH, "Running"). th(_TH, "Failed"). th(_TH, "Killed"). th(_TH, "Successful")._(). tr(_ODD). th("Maps"). td().a(url("attempts", jid, "m", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledMapAttempts()))._(). td().a(url("attempts", jid, "m", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulMapAttempts()))._(). _(). tr(_EVEN). th("Reduces"). td().a(url("attempts", jid, "r", TaskAttemptStateUI.NEW.toString()), String.valueOf(jinfo.getNewReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.RUNNING.toString()), String.valueOf(jinfo.getRunningReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.FAILED.toString()), String.valueOf(jinfo.getFailedReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.KILLED.toString()), String.valueOf(jinfo.getKilledReduceAttempts()))._(). td().a(url("attempts", jid, "r", TaskAttemptStateUI.SUCCESSFUL.toString()), String.valueOf(jinfo.getSuccessfulReduceAttempts()))._(). _(). _(). _(); } }
8,079
38.607843
107
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/BlacklistedNodesInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.Set; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.v2.app.AppContext; @XmlRootElement(name = "blacklistednodesinfo") @XmlAccessorType(XmlAccessType.FIELD) public class BlacklistedNodesInfo { private Set<String> blacklistedNodes; public BlacklistedNodesInfo() { } public BlacklistedNodesInfo(AppContext appContext) { blacklistedNodes = appContext.getBlacklistedNodes(); } public Set<String> getBlacklistedNodes() { return blacklistedNodes; } }
1,491
33.697674
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by joblicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "jobAttempts") @XmlAccessorType(XmlAccessType.FIELD) public class AMAttemptsInfo { @XmlElement(name = "jobAttempt") protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>(); public AMAttemptsInfo() { } // JAXB needs this public void add(AMAttemptInfo info) { this.attempt.add(info); } public ArrayList<AMAttemptInfo> getAttempts() { return this.attempt; } }
1,514
31.934783
78
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlSeeAlso; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; @XmlRootElement(name = "taskAttempt") @XmlSeeAlso({ ReduceTaskAttemptInfo.class }) @XmlAccessorType(XmlAccessType.FIELD) public class TaskAttemptInfo { protected long startTime; protected long finishTime; protected long elapsedTime; protected float progress; protected String id; protected String rack; protected TaskAttemptState state; protected String status; protected String nodeHttpAddress; protected String diagnostics; protected String type; protected String assignedContainerId; @XmlTransient protected ContainerId assignedContainer; public TaskAttemptInfo() { } public TaskAttemptInfo(TaskAttempt ta, Boolean isRunning) { this(ta, TaskType.MAP, isRunning); } public TaskAttemptInfo(TaskAttempt ta, TaskType type, Boolean isRunning) { final TaskAttemptReport report = ta.getReport(); this.type = type.toString(); this.id = MRApps.toString(ta.getID()); this.nodeHttpAddress = ta.getNodeHttpAddress(); this.startTime = report.getStartTime(); this.finishTime = report.getFinishTime(); this.assignedContainerId = ConverterUtils.toString(report.getContainerId()); this.assignedContainer = report.getContainerId(); this.progress = report.getProgress() * 100; this.status = report.getStateString(); this.state = report.getTaskAttemptState(); this.elapsedTime = Times .elapsed(this.startTime, this.finishTime, isRunning); if (this.elapsedTime == -1) { this.elapsedTime = 0; } this.diagnostics = report.getDiagnosticInfo(); this.rack = ta.getNodeRackName(); } public String getAssignedContainerIdStr() { return this.assignedContainerId; } public ContainerId getAssignedContainerId() { return this.assignedContainer; } public String getState() { return this.state.toString(); } public String getStatus() { return status; } public String getId() { return this.id; } public long getStartTime() { return this.startTime; } public long getFinishTime() { return this.finishTime; } public float getProgress() { return this.progress; } public long getElapsedTime() { return this.elapsedTime; } public String getNode() { return this.nodeHttpAddress; } public String getRack() { return this.rack; } public String getNote() { return this.diagnostics; } }
3,961
28.348148
80
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "counter") @XmlAccessorType(XmlAccessType.FIELD) public class TaskCounterInfo { protected String name; protected long value; public TaskCounterInfo() { } public TaskCounterInfo(String name, long value) { this.name = name; this.value = value; } public String getName() { return name; } public long getValue() { return value; } }
1,403
28.87234
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; @XmlRootElement(name = "jobTaskAttemptCounters") @XmlAccessorType(XmlAccessType.FIELD) public class JobTaskAttemptCounterInfo { @XmlTransient protected Counters total = null; protected String id; protected ArrayList<TaskCounterGroupInfo> taskAttemptCounterGroup; public JobTaskAttemptCounterInfo() { } public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) { this.id = MRApps.toString(taskattempt.getID()); total = taskattempt.getCounters(); taskAttemptCounterGroup = new ArrayList<TaskCounterGroupInfo>(); if (total != null) { for (CounterGroup g : total) { if (g != null) { TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g); if (cginfo != null) { taskAttemptCounterGroup.add(cginfo); } } } } } }
2,126
32.761905
81
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.webapp.App; import org.apache.hadoop.yarn.util.Times; @XmlRootElement(name = "info") @XmlAccessorType(XmlAccessType.FIELD) public class AppInfo { protected String appId; protected String name; protected String user; protected long startedOn; protected long elapsedTime; public AppInfo() { } public AppInfo(App app, AppContext context) { this.appId = context.getApplicationID().toString(); this.name = context.getApplicationName().toString(); this.user = context.getUser().toString(); this.startedOn = context.getStartTime(); this.elapsedTime = Times.elapsed(this.startedOn, 0); } public String getId() { return this.appId; } public String getName() { return this.name; } public String getUser() { return this.user; } public long getStartTime() { return this.startedOn; } public long getElapsedTime() { return this.elapsedTime; } }
2,026
27.957143
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ReduceTaskAttemptInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.yarn.util.Times; @XmlRootElement(name = "taskAttempt") @XmlAccessorType(XmlAccessType.FIELD) public class ReduceTaskAttemptInfo extends TaskAttemptInfo { protected long shuffleFinishTime; protected long mergeFinishTime; protected long elapsedShuffleTime; protected long elapsedMergeTime; protected long elapsedReduceTime; public ReduceTaskAttemptInfo() { } public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) { super(ta, type, false); this.shuffleFinishTime = ta.getShuffleFinishTime(); this.mergeFinishTime = ta.getSortFinishTime(); this.elapsedShuffleTime = Times.elapsed(this.startTime, this.shuffleFinishTime, false); if (this.elapsedShuffleTime == -1) { this.elapsedShuffleTime = 0; } this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime, this.mergeFinishTime, false); if (this.elapsedMergeTime == -1) { this.elapsedMergeTime = 0; } this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime, this.finishTime, false); if (this.elapsedReduceTime == -1) { this.elapsedReduceTime = 0; } } public long getShuffleFinishTime() { return this.shuffleFinishTime; } public long getMergeFinishTime() { return this.mergeFinishTime; } public long getElapsedShuffleTime() { return this.elapsedShuffleTime; } public long getElapsedMergeTime() { return this.elapsedMergeTime; } public long getElapsedReduceTime() { return this.elapsedReduceTime; } }
2,670
30.797619
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfEntryInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class ConfEntryInfo { protected String name; protected String value; protected String[] source; public ConfEntryInfo() { } public ConfEntryInfo(String key, String value) { this(key, value, null); } public ConfEntryInfo(String key, String value, String[] source) { this.name = key; this.value = value; this.source = source; } public String getName() { return this.name; } public String getValue() { return this.value; } public String[] getSource() { return source; } }
1,609
27.245614
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by joblicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "jobs") @XmlAccessorType(XmlAccessType.FIELD) public class JobsInfo { protected ArrayList<JobInfo> job = new ArrayList<JobInfo>(); public JobsInfo() { } // JAXB needs this public void add(JobInfo jobInfo) { job.add(jobInfo); } public ArrayList<JobInfo> getJobs() { return job; } }
1,371
30.181818
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.io.IOException; import java.util.ArrayList; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.v2.app.job.Job; @XmlRootElement(name = "conf") @XmlAccessorType(XmlAccessType.FIELD) public class ConfInfo { protected String path; protected ArrayList<ConfEntryInfo> property; public ConfInfo() { } public ConfInfo(Job job) throws IOException { this.property = new ArrayList<ConfEntryInfo>(); Configuration jobConf = job.loadConfFile(); this.path = job.getConfFile().toString(); for (Map.Entry<String, String> entry : jobConf) { this.property.add(new ConfEntryInfo(entry.getKey(), entry.getValue(), jobConf.getPropertySources(entry.getKey()))); } } public ArrayList<ConfEntryInfo> getProperties() { return this.property; } public String getPath() { return this.path; } }
1,990
30.109375
76
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; /** * Job task attempt state. */ @XmlRootElement(name = "jobTaskAttemptState") @XmlAccessorType(XmlAccessType.FIELD) public class JobTaskAttemptState { private String state; public JobTaskAttemptState() { } public JobTaskAttemptState(String state) { this.state = state; } public void setState(String state) { this.state = state; } public String getState() { return this.state; } }
1,430
28.204082
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.ujoin; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @XmlRootElement(name = "jobAttempt") @XmlAccessorType(XmlAccessType.FIELD) public class AMAttemptInfo { protected String nodeHttpAddress; protected String nodeId; protected int id; protected long startTime; protected String containerId; protected String logsLink; public AMAttemptInfo() { } public AMAttemptInfo(AMInfo amInfo, String jobId, String user) { this.nodeHttpAddress = ""; this.nodeId = ""; String nmHost = amInfo.getNodeManagerHost(); int nmHttpPort = amInfo.getNodeManagerHttpPort(); int nmPort = amInfo.getNodeManagerPort(); if (nmHost != null) { this.nodeHttpAddress = nmHost + ":" + nmHttpPort; NodeId nodeId = NodeId.newInstance(nmHost, nmPort); this.nodeId = nodeId.toString(); } this.id = amInfo.getAppAttemptId().getAttemptId(); this.startTime = amInfo.getStartTime(); this.containerId = ""; this.logsLink = ""; ContainerId containerId = amInfo.getContainerId(); if (containerId != null) { this.containerId = containerId.toString(); this.logsLink = join(MRWebAppUtil.getYARNWebappScheme() + nodeHttpAddress, ujoin("node", "containerlogs", this.containerId, user)); } } public String getNodeHttpAddress() { return this.nodeHttpAddress; } public String getNodeId() { return this.nodeId; } public int getAttemptId() { return this.id; } public long getStartTime() { return this.startTime; } public String getContainerId() { return this.containerId; } public String getLogsLink() { return this.logsLink; } }
2,954
29.78125
80
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TasksInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by tasklicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "tasks") @XmlAccessorType(XmlAccessType.FIELD) public class TasksInfo { protected ArrayList<TaskInfo> task = new ArrayList<TaskInfo>(); public TasksInfo() { } // JAXB needs this public void add(TaskInfo taskInfo) { task.add(taskInfo); } public ArrayList<TaskInfo> getTasks() { return task; } }
1,385
30.5
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.yarn.util.Times; @XmlRootElement(name = "task") @XmlAccessorType(XmlAccessType.FIELD) public class TaskInfo { protected long startTime; protected long finishTime; protected long elapsedTime; protected float progress; protected String id; protected TaskState state; protected String type; protected String successfulAttempt; protected String status; @XmlTransient int taskNum; @XmlTransient TaskAttempt successful; public TaskInfo() { } public TaskInfo(Task task) { TaskType ttype = task.getType(); this.type = ttype.toString(); TaskReport report = task.getReport(); this.startTime = report.getStartTime(); this.finishTime = report.getFinishTime(); this.state = report.getTaskState(); this.elapsedTime = Times.elapsed(this.startTime, this.finishTime, this.state == TaskState.RUNNING); if (this.elapsedTime == -1) { this.elapsedTime = 0; } this.progress = report.getProgress() * 100; this.status = report.getStatus(); this.id = MRApps.toString(task.getID()); this.taskNum = task.getID().getId(); this.successful = getSuccessfulAttempt(task); if (successful != null) { this.successfulAttempt = MRApps.toString(successful.getID()); } else { this.successfulAttempt = ""; } } public float getProgress() { return this.progress; } public String getState() { return this.state.toString(); } public String getId() { return this.id; } public int getTaskNum() { return this.taskNum; } public long getStartTime() { return this.startTime; } public long getFinishTime() { return this.finishTime; } public long getElapsedTime() { return this.elapsedTime; } public String getSuccessfulAttempt() { return this.successfulAttempt; } public TaskAttempt getSuccessful() { return this.successful; } private TaskAttempt getSuccessfulAttempt(Task task) { for (TaskAttempt attempt : task.getAttempts().values()) { if (attempt.getState() == TaskAttemptState.SUCCEEDED) { return attempt; } } return null; } public String getStatus() { return status; } }
3,682
27.330769
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by taskattemptlicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "taskAttempts") @XmlAccessorType(XmlAccessType.FIELD) public class TaskAttemptsInfo { protected ArrayList<TaskAttemptInfo> taskAttempt = new ArrayList<TaskAttemptInfo>(); public TaskAttemptsInfo() { } // JAXB needs this public void add(TaskAttemptInfo taskattemptInfo) { taskAttempt.add(taskattemptInfo); } public ArrayList<TaskAttemptInfo> getTaskAttempts() { return taskAttempt; } }
1,483
32.727273
86
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.CounterGroup; @XmlRootElement(name = "counterGroup") @XmlAccessorType(XmlAccessType.FIELD) public class CounterGroupInfo { protected String counterGroupName; @XmlElement(name = "counter") protected ArrayList<CounterInfo> counter; public CounterGroupInfo() { } public CounterGroupInfo(String name, CounterGroup group, CounterGroup mg, CounterGroup rg) { this.counterGroupName = name; this.counter = new ArrayList<CounterInfo>(); for (Counter c : group) { Counter mc = mg == null ? null : mg.findCounter(c.getName()); Counter rc = rg == null ? null : rg.findCounter(c.getName()); CounterInfo cinfo = new CounterInfo(c, mc, rc); this.counter.add(cinfo); } } }
1,887
33.327273
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import java.util.List; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.util.Times; @XmlRootElement(name = "job") @XmlAccessorType(XmlAccessType.FIELD) public class JobInfo { // ok for any user to see protected long startTime; protected long finishTime; protected long elapsedTime; protected String id; protected String name; protected String user; protected String queue; protected JobState state; protected int mapsTotal; protected int mapsCompleted; protected int reducesTotal; protected int reducesCompleted; protected float mapProgress; protected float reduceProgress; @XmlTransient protected String mapProgressPercent; @XmlTransient protected String reduceProgressPercent; // these should only be seen if acls allow protected int mapsPending; protected int mapsRunning; protected int reducesPending; protected int reducesRunning; protected boolean uberized; protected String diagnostics; protected int newReduceAttempts = 0; protected int runningReduceAttempts = 0; protected int failedReduceAttempts = 0; protected int killedReduceAttempts = 0; protected int successfulReduceAttempts = 0; protected int newMapAttempts = 0; protected int runningMapAttempts = 0; protected int failedMapAttempts = 0; protected int killedMapAttempts = 0; protected int successfulMapAttempts = 0; protected ArrayList<ConfEntryInfo> acls; public JobInfo() { } public JobInfo(Job job, Boolean hasAccess) { this.id = MRApps.toString(job.getID()); JobReport report = job.getReport(); this.startTime = report.getStartTime(); this.finishTime = report.getFinishTime(); this.elapsedTime = Times.elapsed(this.startTime, this.finishTime); if (this.elapsedTime == -1) { this.elapsedTime = 0; } this.name = job.getName().toString(); this.user = job.getUserName(); this.queue = job.getQueueName(); this.state = job.getState(); this.mapsTotal = job.getTotalMaps(); this.mapsCompleted = job.getCompletedMaps(); this.mapProgress = report.getMapProgress() * 100; this.mapProgressPercent = StringUtils.formatPercent(report.getMapProgress(), 2); this.reducesTotal = job.getTotalReduces(); this.reducesCompleted = job.getCompletedReduces(); this.reduceProgress = report.getReduceProgress() * 100; this.reduceProgressPercent = StringUtils.formatPercent(report.getReduceProgress(), 2); this.acls = new ArrayList<ConfEntryInfo>(); if (hasAccess) { this.diagnostics = ""; countTasksAndAttempts(job); this.uberized = job.isUber(); List<String> diagnostics = job.getDiagnostics(); if (diagnostics != null && !diagnostics.isEmpty()) { StringBuffer b = new StringBuffer(); for (String diag : diagnostics) { b.append(diag); } this.diagnostics = b.toString(); } Map<JobACL, AccessControlList> allacls = job.getJobACLs(); if (allacls != null) { for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) { this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry .getValue().getAclString())); } } } } public int getNewReduceAttempts() { return this.newReduceAttempts; } public int getKilledReduceAttempts() { return this.killedReduceAttempts; } public int getFailedReduceAttempts() { return this.failedReduceAttempts; } public int getRunningReduceAttempts() { return this.runningReduceAttempts; } public int getSuccessfulReduceAttempts() { return this.successfulReduceAttempts; } public int getNewMapAttempts() { return this.newMapAttempts; } public int getKilledMapAttempts() { return this.killedMapAttempts; } public ArrayList<ConfEntryInfo> getAcls() { return acls; } public int getFailedMapAttempts() { return this.failedMapAttempts; } public int getRunningMapAttempts() { return this.runningMapAttempts; } public int getSuccessfulMapAttempts() { return this.successfulMapAttempts; } public int getReducesCompleted() { return this.reducesCompleted; } public int getReducesTotal() { return this.reducesTotal; } public int getReducesPending() { return this.reducesPending; } public int getReducesRunning() { return this.reducesRunning; } public int getMapsCompleted() { return this.mapsCompleted; } public int getMapsTotal() { return this.mapsTotal; } public int getMapsPending() { return this.mapsPending; } public int getMapsRunning() { return this.mapsRunning; } public String getState() { return this.state.toString(); } public String getUserName() { return this.user; } public String getName() { return this.name; } public String getQueueName() { return this.queue; } public String getId() { return this.id; } public long getStartTime() { return this.startTime; } public long getElapsedTime() { return this.elapsedTime; } public long getFinishTime() { return this.finishTime; } public boolean isUberized() { return this.uberized; } public String getdiagnostics() { return this.diagnostics; } public float getMapProgress() { return this.mapProgress; } public String getMapProgressPercent() { return this.mapProgressPercent; } public float getReduceProgress() { return this.reduceProgress; } public String getReduceProgressPercent() { return this.reduceProgressPercent; } /** * Go through a job and update the member variables with counts for * information to output in the page. * * @param job * the job to get counts for. */ private void countTasksAndAttempts(Job job) { final Map<TaskId, Task> tasks = job.getTasks(); if (tasks == null) { return; } for (Task task : tasks.values()) { switch (task.getType()) { case MAP: // Task counts switch (task.getState()) { case RUNNING: ++this.mapsRunning; break; case SCHEDULED: ++this.mapsPending; break; default: break; } break; case REDUCE: // Task counts switch (task.getState()) { case RUNNING: ++this.reducesRunning; break; case SCHEDULED: ++this.reducesPending; break; default: break; } break; default: throw new IllegalStateException( "Task type is neither map nor reduce: " + task.getType()); } // Attempts counts Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts(); int newAttempts, running, successful, failed, killed; for (TaskAttempt attempt : attempts.values()) { newAttempts = 0; running = 0; successful = 0; failed = 0; killed = 0; if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) { ++newAttempts; } else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) { ++running; } else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt .getState())) { ++successful; } else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) { ++failed; } else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) { ++killed; } switch (task.getType()) { case MAP: this.newMapAttempts += newAttempts; this.runningMapAttempts += running; this.successfulMapAttempts += successful; this.failedMapAttempts += failed; this.killedMapAttempts += killed; break; case REDUCE: this.newReduceAttempts += newAttempts; this.runningReduceAttempts += running; this.successfulReduceAttempts += successful; this.failedReduceAttempts += failed; this.killedReduceAttempts += killed; break; default: throw new IllegalStateException("Task type neither map nor reduce: " + task.getType()); } } } } }
10,050
26.688705
82
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.Counter; @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class CounterInfo { protected String name; protected long totalCounterValue; protected long mapCounterValue; protected long reduceCounterValue; public CounterInfo() { } public CounterInfo(Counter c, Counter mc, Counter rc) { this.name = c.getName(); this.totalCounterValue = c.getValue(); this.mapCounterValue = mc == null ? 0 : mc.getValue(); this.reduceCounterValue = rc == null ? 0 : rc.getValue(); } }
1,558
33.644444
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.util.MRApps; @XmlRootElement(name = "jobTaskCounters") @XmlAccessorType(XmlAccessType.FIELD) public class JobTaskCounterInfo { @XmlTransient protected Counters total = null; protected String id; protected ArrayList<TaskCounterGroupInfo> taskCounterGroup; public JobTaskCounterInfo() { } public JobTaskCounterInfo(Task task) { total = task.getCounters(); this.id = MRApps.toString(task.getID()); taskCounterGroup = new ArrayList<TaskCounterGroupInfo>(); if (total != null) { for (CounterGroup g : total) { if (g != null) { TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g); taskCounterGroup.add(cginfo); } } } } }
1,995
32.266667
81
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.CounterGroup; @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class TaskCounterGroupInfo { protected String counterGroupName; protected ArrayList<TaskCounterInfo> counter; public TaskCounterGroupInfo() { } public TaskCounterGroupInfo(String name, CounterGroup group) { this.counterGroupName = name; this.counter = new ArrayList<TaskCounterInfo>(); for (Counter c : group) { TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue()); this.counter.add(cinfo); } } }
1,655
32.12
77
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.webapp.dao; import java.util.ArrayList; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; import org.apache.hadoop.mapreduce.CounterGroup; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.util.MRApps; @XmlRootElement(name = "jobCounters") @XmlAccessorType(XmlAccessType.FIELD) public class JobCounterInfo { @XmlTransient protected Counters total = null; @XmlTransient protected Counters map = null; @XmlTransient protected Counters reduce = null; protected String id; protected ArrayList<CounterGroupInfo> counterGroup; public JobCounterInfo() { } public JobCounterInfo(AppContext ctx, Job job) { getCounters(ctx, job); counterGroup = new ArrayList<CounterGroupInfo>(); this.id = MRApps.toString(job.getID()); if (total != null) { for (CounterGroup g : total) { if (g != null) { CounterGroup mg = map == null ? null : map.getGroup(g.getName()); CounterGroup rg = reduce == null ? null : reduce .getGroup(g.getName()); CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g, mg, rg); counterGroup.add(cginfo); } } } } private void getCounters(AppContext ctx, Job job) { if (job == null) { return; } total = job.getAllCounters(); boolean needTotalCounters = false; if (total == null) { total = new Counters(); needTotalCounters = true; } map = new Counters(); reduce = new Counters(); // Get all types of counters Map<TaskId, Task> tasks = job.getTasks(); for (Task t : tasks.values()) { Counters counters = t.getCounters(); if (counters == null) { continue; } switch (t.getType()) { case MAP: map.incrAllCounters(counters); break; case REDUCE: reduce.incrAllCounters(counters); break; } if (needTotalCounters) { total.incrAllCounters(counters); } } } }
3,244
29.327103
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.client; import java.net.InetSocketAddress; import org.apache.hadoop.service.Service; public interface ClientService extends Service { public abstract InetSocketAddress getBindAddress(); public abstract int getHttpPort(); }
1,083
33.967742
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.client; import org.apache.hadoop.classification.InterfaceAudience;
943
43.952381
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.client; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.webapp.WebApp; import org.apache.hadoop.yarn.webapp.WebApps; /** * This module is responsible for talking to the * jobclient (user facing). * */ public class MRClientService extends AbstractService implements ClientService { static final Log LOG = LogFactory.getLog(MRClientService.class); private MRClientProtocol protocolHandler; private Server server; private WebApp webApp; private InetSocketAddress bindAddress; private AppContext appContext; public MRClientService(AppContext appContext) { super(MRClientService.class.getName()); this.appContext = appContext; this.protocolHandler = new MRClientProtocolHandler(); } protected void serviceStart() throws Exception { Configuration conf = getConfig(); YarnRPC rpc = YarnRPC.create(conf); InetSocketAddress address = new InetSocketAddress(0); server = rpc.getServer(MRClientProtocol.class, protocolHandler, address, conf, appContext.getClientToAMTokenSecretManager(), conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT, MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT), MRJobConfig.MR_AM_JOB_CLIENT_PORT_RANGE); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { refreshServiceAcls(conf, new MRAMPolicyProvider()); } server.start(); this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(), server.getListenerAddress().getPort()); LOG.info("Instantiated MRClientService at " + this.bindAddress); try { // Explicitly disabling SSL for map reduce task as we can't allow MR users // to gain access to keystore file for opening SSL listener. We can trust // RM/NM to issue SSL certificates but definitely not MR-AM as it is // running in user-land. webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws") .withHttpPolicy(conf, Policy.HTTP_ONLY).start(new AMWebApp()); } catch (Exception e) { LOG.error("Webapps failed to start. Ignoring for now:", e); } super.serviceStart(); } void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) { this.server.refreshServiceAcl(configuration, policyProvider); } @Override protected void serviceStop() throws Exception { if (server != null) { server.stop(); } if (webApp != null) { webApp.stop(); } super.serviceStop(); } @Override public InetSocketAddress getBindAddress() { return bindAddress; } @Override public int getHttpPort() { return webApp.port(); } class MRClientProtocolHandler implements MRClientProtocol { private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @Override public InetSocketAddress getConnectAddress() { return getBindAddress(); } private Job verifyAndGetJob(JobId jobID, JobACL accessType, boolean exceptionThrow) throws IOException { Job job = appContext.getJob(jobID); if (job == null && exceptionThrow) { throw new IOException("Unknown Job " + jobID); } UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); if (job != null && !job.checkAccess(ugi, accessType)) { throw new AccessControlException("User " + ugi.getShortUserName() + " cannot perform operation " + accessType.name() + " on " + jobID); } return job; } private Task verifyAndGetTask(TaskId taskID, JobACL accessType) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), accessType, true).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } return task; } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, JobACL accessType) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), accessType).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } return attempt; } @Override public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); return response; } @Override public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); // false is for retain compatibility Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, false); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { response.setJobReport(job.getReport()); } else { response.setJobReport(null); } return response; } @Override public GetTaskAttemptReportResponse getTaskAttemptReport( GetTaskAttemptReportRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); return response; } @Override public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws IOException { TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); response.setTaskReport( verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); return response; } @Override public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents( GetTaskAttemptCompletionEventsRequest request) throws IOException { JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); response.addAllCompletionEvents(Arrays.asList( job.getTaskAttemptCompletionEvents(fromEventId, maxEvents))); return response; } @SuppressWarnings("unchecked") @Override public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); String message = "Kill job " + jobId + " received from " + callerUGI + " at " + Server.getRemoteAddress(); LOG.info(message); verifyAndGetJob(jobId, JobACL.MODIFY_JOB, false); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( new JobEvent(jobId, JobEventType.JOB_KILL)); KillJobResponse response = recordFactory.newRecordInstance(KillJobResponse.class); return response; } @SuppressWarnings("unchecked") @Override public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); String message = "Kill task " + taskId + " received from " + callerUGI + " at " + Server.getRemoteAddress(); LOG.info(message); verifyAndGetTask(taskId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = recordFactory.newRecordInstance(KillTaskResponse.class); return response; } @SuppressWarnings("unchecked") @Override public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); String message = "Kill task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress(); LOG.info(message); verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_KILL)); KillTaskAttemptResponse response = recordFactory.newRecordInstance(KillTaskAttemptResponse.class); return response; } @Override public GetDiagnosticsResponse getDiagnostics( GetDiagnosticsRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getDiagnostics()); return response; } @SuppressWarnings("unchecked") @Override public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); String message = "Fail task attempt " + taskAttemptId + " received from " + callerUGI + " at " + Server.getRemoteAddress(); LOG.info(message); verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( new TaskAttemptEvent(taskAttemptId, TaskAttemptEventType.TA_FAILMSG_BY_CLIENT)); FailTaskAttemptResponse response = recordFactory. newRecordInstance(FailTaskAttemptResponse.class); return response; } private final Object getTaskReportsLock = new Object(); @Override public GetTaskReportsResponse getTaskReports( GetTaskReportsRequest request) throws IOException { JobId jobId = request.getJobId(); TaskType taskType = request.getTaskType(); GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB, true); Collection<Task> tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); // Take lock to allow only one call, otherwise heap will blow up because // of counters in the report when there are multiple callers. synchronized (getTaskReportsLock) { for (Task task : tasks) { response.addTaskReport(task.getReport()); } } return response; } @Override public GetDelegationTokenResponse getDelegationToken( GetDelegationTokenRequest request) throws IOException { throw new IOException("MR AM not authorized to issue delegation" + " token"); } @Override public RenewDelegationTokenResponse renewDelegationToken( RenewDelegationTokenRequest request) throws IOException { throw new IOException("MR AM not authorized to renew delegation" + " token"); } @Override public CancelDelegationTokenResponse cancelDelegationToken( CancelDelegationTokenRequest request) throws IOException { throw new IOException("MR AM not authorized to cancel delegation" + " token"); } } public KillTaskAttemptResponse forceKillTaskAttempt( KillTaskAttemptRequest request) throws YarnException, IOException { return protocolHandler.killTaskAttempt(request); } public WebApp getWebApp() { return webApp; } }
18,008
40.210526
97
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptKillEvent; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.NMTokenCache; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.RackResolver; import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; /** * Allocates the container from the ResourceManager scheduler. */ public class RMContainerAllocator extends RMContainerRequestor implements ContainerAllocator { static final Log LOG = LogFactory.getLog(RMContainerAllocator.class); public static final float DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART = 0.05f; static final Priority PRIORITY_FAST_FAIL_MAP; static final Priority PRIORITY_REDUCE; static final Priority PRIORITY_MAP; @VisibleForTesting public static final String RAMPDOWN_DIAGNOSTIC = "Reducer preempted " + "to make room for pending map attempts"; private Thread eventHandlingThread; private final AtomicBoolean stopped; static { PRIORITY_FAST_FAIL_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class); PRIORITY_FAST_FAIL_MAP.setPriority(5); PRIORITY_REDUCE = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class); PRIORITY_REDUCE.setPriority(10); PRIORITY_MAP = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Priority.class); PRIORITY_MAP.setPriority(20); } /* Vocabulary Used: pending -> requests which are NOT yet sent to RM scheduled -> requests which are sent to RM but not yet assigned assigned -> requests which are assigned to a container completed -> request corresponding to which container has completed Lifecycle of map scheduled->assigned->completed Lifecycle of reduce pending->scheduled->assigned->completed Maps are scheduled as soon as their requests are received. Reduces are added to the pending and are ramped up (added to scheduled) based on completed maps and current availability in the cluster. */ //reduces which are not yet scheduled private final LinkedList<ContainerRequest> pendingReduces = new LinkedList<ContainerRequest>(); //holds information about the assigned containers to task attempts private final AssignedRequests assignedRequests = new AssignedRequests(); //holds scheduled requests to be fulfilled by RM private final ScheduledRequests scheduledRequests = new ScheduledRequests(); private int containersAllocated = 0; private int containersReleased = 0; private int hostLocalAssigned = 0; private int rackLocalAssigned = 0; private int lastCompletedTasks = 0; private boolean recalculateReduceSchedule = false; private Resource mapResourceRequest = Resources.none(); private Resource reduceResourceRequest = Resources.none(); private boolean reduceStarted = false; private float maxReduceRampupLimit = 0; private float maxReducePreemptionLimit = 0; /** * after this threshold, if the container request is not allocated, it is * considered delayed. */ private long allocationDelayThresholdMs = 0; private float reduceSlowStart = 0; private int maxRunningMaps = 0; private int maxRunningReduces = 0; private long retryInterval; private long retrystartTime; private Clock clock; @VisibleForTesting protected BlockingQueue<ContainerAllocatorEvent> eventQueue = new LinkedBlockingQueue<ContainerAllocatorEvent>(); private ScheduleStats scheduleStats = new ScheduleStats(); private String mapNodeLabelExpression; private String reduceNodeLabelExpression; public RMContainerAllocator(ClientService clientService, AppContext context) { super(clientService, context); this.stopped = new AtomicBoolean(false); this.clock = context.getClock(); } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); reduceSlowStart = conf.getFloat( MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART); maxReduceRampupLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT); maxReducePreemptionLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT); allocationDelayThresholdMs = conf.getInt( MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC, MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT); maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT); RackResolver.init(conf); retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS); mapNodeLabelExpression = conf.get(MRJobConfig.MAP_NODE_LABEL_EXP); reduceNodeLabelExpression = conf.get(MRJobConfig.REDUCE_NODE_LABEL_EXP); // Init startTime to current time. If all goes well, it will be reset after // first attempt to contact RM. retrystartTime = System.currentTimeMillis(); } @Override protected void serviceStart() throws Exception { this.eventHandlingThread = new Thread() { @SuppressWarnings("unchecked") @Override public void run() { ContainerAllocatorEvent event; while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { event = RMContainerAllocator.this.eventQueue.take(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.error("Returning, interrupted : " + e); } return; } try { handleEvent(event); } catch (Throwable t) { LOG.error("Error in handling event type " + event.getType() + " to the ContainreAllocator", t); // Kill the AM eventHandler.handle(new JobEvent(getJob().getID(), JobEventType.INTERNAL_ERROR)); return; } } } }; this.eventHandlingThread.start(); super.serviceStart(); } @Override protected synchronized void heartbeat() throws Exception { scheduleStats.updateAndLogIfChanged("Before Scheduling: "); List<Container> allocatedContainers = getResources(); if (allocatedContainers != null && allocatedContainers.size() > 0) { scheduledRequests.assign(allocatedContainers); } int completedMaps = getJob().getCompletedMaps(); int completedTasks = completedMaps + getJob().getCompletedReduces(); if ((lastCompletedTasks != completedTasks) || (scheduledRequests.maps.size() > 0)) { lastCompletedTasks = completedTasks; recalculateReduceSchedule = true; } if (recalculateReduceSchedule) { preemptReducesIfNeeded(); scheduleReduces( getJob().getTotalMaps(), completedMaps, scheduledRequests.maps.size(), scheduledRequests.reduces.size(), assignedRequests.maps.size(), assignedRequests.reduces.size(), mapResourceRequest, reduceResourceRequest, pendingReduces.size(), maxReduceRampupLimit, reduceSlowStart); recalculateReduceSchedule = false; } scheduleStats.updateAndLogIfChanged("After Scheduling: "); } @Override protected void serviceStop() throws Exception { if (stopped.getAndSet(true)) { // return if already stopped return; } if (eventHandlingThread != null) { eventHandlingThread.interrupt(); } super.serviceStop(); scheduleStats.log("Final Stats: "); } @Private @VisibleForTesting AssignedRequests getAssignedRequests() { return assignedRequests; } @Private @VisibleForTesting ScheduledRequests getScheduledRequests() { return scheduledRequests; } public boolean getIsReduceStarted() { return reduceStarted; } public void setIsReduceStarted(boolean reduceStarted) { this.reduceStarted = reduceStarted; } @Override public void handle(ContainerAllocatorEvent event) { int qSize = eventQueue.size(); if (qSize != 0 && qSize % 1000 == 0) { LOG.info("Size of event-queue in RMContainerAllocator is " + qSize); } int remCapacity = eventQueue.remainingCapacity(); if (remCapacity < 1000) { LOG.warn("Very low remaining capacity in the event-queue " + "of RMContainerAllocator: " + remCapacity); } try { eventQueue.put(event); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } } @SuppressWarnings({ "unchecked" }) protected synchronized void handleEvent(ContainerAllocatorEvent event) { recalculateReduceSchedule = true; if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) { ContainerRequestEvent reqEvent = (ContainerRequestEvent) event; JobId jobId = getJob().getID(); Resource supportedMaxContainerCapability = getMaxContainerCapability(); if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) { if (mapResourceRequest.equals(Resources.none())) { mapResourceRequest = reqEvent.getCapability(); eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent( org.apache.hadoop.mapreduce.TaskType.MAP, mapResourceRequest .getMemory()))); LOG.info("mapResourceRequest:" + mapResourceRequest); if (mapResourceRequest.getMemory() > supportedMaxContainerCapability .getMemory() || mapResourceRequest.getVirtualCores() > supportedMaxContainerCapability .getVirtualCores()) { String diagMsg = "MAP capability required is more than the supported " + "max container capability in the cluster. Killing the Job. mapResourceRequest: " + mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability; LOG.info(diagMsg); eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg)); eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL)); } } // set the resources reqEvent.getCapability().setMemory(mapResourceRequest.getMemory()); reqEvent.getCapability().setVirtualCores( mapResourceRequest.getVirtualCores()); scheduledRequests.addMap(reqEvent);//maps are immediately scheduled } else { if (reduceResourceRequest.equals(Resources.none())) { reduceResourceRequest = reqEvent.getCapability(); eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent( org.apache.hadoop.mapreduce.TaskType.REDUCE, reduceResourceRequest.getMemory()))); LOG.info("reduceResourceRequest:" + reduceResourceRequest); if (reduceResourceRequest.getMemory() > supportedMaxContainerCapability .getMemory() || reduceResourceRequest.getVirtualCores() > supportedMaxContainerCapability .getVirtualCores()) { String diagMsg = "REDUCE capability required is more than the " + "supported max container capability in the cluster. Killing the " + "Job. reduceResourceRequest: " + reduceResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability; LOG.info(diagMsg); eventHandler.handle(new JobDiagnosticsUpdateEvent(jobId, diagMsg)); eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL)); } } // set the resources reqEvent.getCapability().setMemory(reduceResourceRequest.getMemory()); reqEvent.getCapability().setVirtualCores( reduceResourceRequest.getVirtualCores()); if (reqEvent.getEarlierAttemptFailed()) { //add to the front of queue for fail fast pendingReduces.addFirst(new ContainerRequest(reqEvent, PRIORITY_REDUCE, reduceNodeLabelExpression)); } else { pendingReduces.add(new ContainerRequest(reqEvent, PRIORITY_REDUCE, reduceNodeLabelExpression)); //reduces are added to pending and are slowly ramped up } } } else if ( event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) { LOG.info("Processing the event " + event.toString()); TaskAttemptId aId = event.getAttemptID(); boolean removed = scheduledRequests.remove(aId); if (!removed) { ContainerId containerId = assignedRequests.get(aId); if (containerId != null) { removed = true; assignedRequests.remove(aId); containersReleased++; pendingRelease.add(containerId); release(containerId); } } if (!removed) { LOG.error("Could not deallocate container for task attemptId " + aId); } } else if ( event.getType() == ContainerAllocator.EventType.CONTAINER_FAILED) { ContainerFailedEvent fEv = (ContainerFailedEvent) event; String host = getHost(fEv.getContMgrAddress()); containerFailedOnHost(host); } } private static String getHost(String contMgrAddress) { String host = contMgrAddress; String[] hostport = host.split(":"); if (hostport.length == 2) { host = hostport[0]; } return host; } @Private @VisibleForTesting synchronized void setReduceResourceRequest(Resource res) { this.reduceResourceRequest = res; } @Private @VisibleForTesting synchronized void setMapResourceRequest(Resource res) { this.mapResourceRequest = res; } @Private @VisibleForTesting void preemptReducesIfNeeded() { if (reduceResourceRequest.equals(Resources.none())) { return; // no reduces } //check if reduces have taken over the whole cluster and there are //unassigned maps if (scheduledRequests.maps.size() > 0) { Resource resourceLimit = getResourceLimit(); Resource availableResourceForMap = Resources.subtract( resourceLimit, Resources.multiply(reduceResourceRequest, assignedRequests.reduces.size() - assignedRequests.preemptionWaitingReduces.size())); // availableMemForMap must be sufficient to run at least 1 map if (ResourceCalculatorUtils.computeAvailableContainers(availableResourceForMap, mapResourceRequest, getSchedulerResourceTypes()) <= 0) { // to make sure new containers are given to maps and not reduces // ramp down all scheduled reduces if any // (since reduces are scheduled at higher priority than maps) LOG.info("Ramping down all scheduled reduces:" + scheduledRequests.reduces.size()); for (ContainerRequest req : scheduledRequests.reduces.values()) { pendingReduces.add(req); } scheduledRequests.reduces.clear(); //do further checking to find the number of map requests that were //hanging around for a while int hangingMapRequests = getNumOfHangingRequests(scheduledRequests.maps); if (hangingMapRequests > 0) { // preempt for making space for at least one map int preemptionReduceNumForOneMap = ResourceCalculatorUtils.divideAndCeilContainers(mapResourceRequest, reduceResourceRequest, getSchedulerResourceTypes()); int preemptionReduceNumForPreemptionLimit = ResourceCalculatorUtils.divideAndCeilContainers( Resources.multiply(resourceLimit, maxReducePreemptionLimit), reduceResourceRequest, getSchedulerResourceTypes()); int preemptionReduceNumForAllMaps = ResourceCalculatorUtils.divideAndCeilContainers( Resources.multiply(mapResourceRequest, hangingMapRequests), reduceResourceRequest, getSchedulerResourceTypes()); int toPreempt = Math.min(Math.max(preemptionReduceNumForOneMap, preemptionReduceNumForPreemptionLimit), preemptionReduceNumForAllMaps); LOG.info("Going to preempt " + toPreempt + " due to lack of space for maps"); assignedRequests.preemptReduce(toPreempt); } } } } private int getNumOfHangingRequests(Map<TaskAttemptId, ContainerRequest> requestMap) { if (allocationDelayThresholdMs <= 0) return requestMap.size(); int hangingRequests = 0; long currTime = clock.getTime(); for (ContainerRequest request: requestMap.values()) { long delay = currTime - request.requestTimeMs; if (delay > allocationDelayThresholdMs) hangingRequests++; } return hangingRequests; } @Private public void scheduleReduces( int totalMaps, int completedMaps, int scheduledMaps, int scheduledReduces, int assignedMaps, int assignedReduces, Resource mapResourceReqt, Resource reduceResourceReqt, int numPendingReduces, float maxReduceRampupLimit, float reduceSlowStart) { if (numPendingReduces == 0) { return; } // get available resources for this job Resource headRoom = getAvailableResources(); if (headRoom == null) { headRoom = Resources.none(); } LOG.info("Recalculating schedule, headroom=" + headRoom); //check for slow start if (!getIsReduceStarted()) {//not set yet int completedMapsForReduceSlowstart = (int)Math.ceil(reduceSlowStart * totalMaps); if(completedMaps < completedMapsForReduceSlowstart) { LOG.info("Reduce slow start threshold not met. " + "completedMapsForReduceSlowstart " + completedMapsForReduceSlowstart); return; } else { LOG.info("Reduce slow start threshold reached. Scheduling reduces."); setIsReduceStarted(true); } } //if all maps are assigned, then ramp up all reduces irrespective of the //headroom if (scheduledMaps == 0 && numPendingReduces > 0) { LOG.info("All maps assigned. " + "Ramping up all remaining reduces:" + numPendingReduces); scheduleAllReduces(); return; } float completedMapPercent = 0f; if (totalMaps != 0) {//support for 0 maps completedMapPercent = (float)completedMaps/totalMaps; } else { completedMapPercent = 1; } Resource netScheduledMapResource = Resources.multiply(mapResourceReqt, (scheduledMaps + assignedMaps)); Resource netScheduledReduceResource = Resources.multiply(reduceResourceReqt, (scheduledReduces + assignedReduces)); Resource finalMapResourceLimit; Resource finalReduceResourceLimit; // ramp up the reduces based on completed map percentage Resource totalResourceLimit = getResourceLimit(); Resource idealReduceResourceLimit = Resources.multiply(totalResourceLimit, Math.min(completedMapPercent, maxReduceRampupLimit)); Resource ideaMapResourceLimit = Resources.subtract(totalResourceLimit, idealReduceResourceLimit); // check if there aren't enough maps scheduled, give the free map capacity // to reduce. // Even when container number equals, there may be unused resources in one // dimension if (ResourceCalculatorUtils.computeAvailableContainers(ideaMapResourceLimit, mapResourceReqt, getSchedulerResourceTypes()) >= (scheduledMaps + assignedMaps)) { // enough resource given to maps, given the remaining to reduces Resource unusedMapResourceLimit = Resources.subtract(ideaMapResourceLimit, netScheduledMapResource); finalReduceResourceLimit = Resources.add(idealReduceResourceLimit, unusedMapResourceLimit); finalMapResourceLimit = Resources.subtract(totalResourceLimit, finalReduceResourceLimit); } else { finalMapResourceLimit = ideaMapResourceLimit; finalReduceResourceLimit = idealReduceResourceLimit; } LOG.info("completedMapPercent " + completedMapPercent + " totalResourceLimit:" + totalResourceLimit + " finalMapResourceLimit:" + finalMapResourceLimit + " finalReduceResourceLimit:" + finalReduceResourceLimit + " netScheduledMapResource:" + netScheduledMapResource + " netScheduledReduceResource:" + netScheduledReduceResource); int rampUp = ResourceCalculatorUtils.computeAvailableContainers(Resources.subtract( finalReduceResourceLimit, netScheduledReduceResource), reduceResourceReqt, getSchedulerResourceTypes()); if (rampUp > 0) { rampUp = Math.min(rampUp, numPendingReduces); LOG.info("Ramping up " + rampUp); rampUpReduces(rampUp); } else if (rampUp < 0) { int rampDown = -1 * rampUp; rampDown = Math.min(rampDown, scheduledReduces); LOG.info("Ramping down " + rampDown); rampDownReduces(rampDown); } } @Private public void scheduleAllReduces() { for (ContainerRequest req : pendingReduces) { scheduledRequests.addReduce(req); } pendingReduces.clear(); } @Private public void rampUpReduces(int rampUp) { //more reduce to be scheduled for (int i = 0; i < rampUp; i++) { ContainerRequest request = pendingReduces.removeFirst(); scheduledRequests.addReduce(request); } } @Private public void rampDownReduces(int rampDown) { //remove from the scheduled and move back to pending for (int i = 0; i < rampDown; i++) { ContainerRequest request = scheduledRequests.removeReduce(); pendingReduces.add(request); } } @SuppressWarnings("unchecked") private List<Container> getResources() throws Exception { applyConcurrentTaskLimits(); // will be null the first time Resource headRoom = getAvailableResources() == null ? Resources.none() : Resources.clone(getAvailableResources()); AllocateResponse response; /* * If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS * milliseconds before aborting. During this interval, AM will still try * to contact the RM. */ try { response = makeRemoteRequest(); // Reset retry count if no exception occurred. retrystartTime = System.currentTimeMillis(); } catch (ApplicationAttemptNotFoundException e ) { // This can happen if the RM has been restarted. If it is in that state, // this application must clean itself up. eventHandler.handle(new JobEvent(this.getJob().getID(), JobEventType.JOB_AM_REBOOT)); throw new YarnRuntimeException( "Resource Manager doesn't recognize AttemptId: " + this.getContext().getApplicationAttemptId(), e); } catch (ApplicationMasterNotRegisteredException e) { LOG.info("ApplicationMaster is out of sync with ResourceManager," + " hence resync and send outstanding requests."); // RM may have restarted, re-register with RM. lastResponseID = 0; register(); addOutstandingRequestOnResync(); return null; } catch (Exception e) { // This can happen when the connection to the RM has gone down. Keep // re-trying until the retryInterval has expired. if (System.currentTimeMillis() - retrystartTime >= retryInterval) { LOG.error("Could not contact RM after " + retryInterval + " milliseconds."); eventHandler.handle(new JobEvent(this.getJob().getID(), JobEventType.JOB_AM_REBOOT)); throw new YarnRuntimeException("Could not contact RM after " + retryInterval + " milliseconds."); } // Throw this up to the caller, which may decide to ignore it and // continue to attempt to contact the RM. throw e; } Resource newHeadRoom = getAvailableResources() == null ? Resources.none() : getAvailableResources(); List<Container> newContainers = response.getAllocatedContainers(); // Setting NMTokens if (response.getNMTokens() != null) { for (NMToken nmToken : response.getNMTokens()) { NMTokenCache.setNMToken(nmToken.getNodeId().toString(), nmToken.getToken()); } } // Setting AMRMToken if (response.getAMRMToken() != null) { updateAMRMToken(response.getAMRMToken()); } List<ContainerStatus> finishedContainers = response.getCompletedContainersStatuses(); if (newContainers.size() + finishedContainers.size() > 0 || !headRoom.equals(newHeadRoom)) { //something changed recalculateReduceSchedule = true; if (LOG.isDebugEnabled() && !headRoom.equals(newHeadRoom)) { LOG.debug("headroom=" + newHeadRoom); } } if (LOG.isDebugEnabled()) { for (Container cont : newContainers) { LOG.debug("Received new Container :" + cont); } } //Called on each allocation. Will know about newly blacklisted/added hosts. computeIgnoreBlacklisting(); handleUpdatedNodes(response); for (ContainerStatus cont : finishedContainers) { LOG.info("Received completed container " + cont.getContainerId()); TaskAttemptId attemptID = assignedRequests.get(cont.getContainerId()); if (attemptID == null) { LOG.error("Container complete event for unknown container id " + cont.getContainerId()); } else { pendingRelease.remove(cont.getContainerId()); assignedRequests.remove(attemptID); // send the container completed event to Task attempt eventHandler.handle(createContainerFinishedEvent(cont, attemptID)); // Send the diagnostics String diagnostics = StringInterner.weakIntern(cont.getDiagnostics()); eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(attemptID, diagnostics)); } } return newContainers; } private void applyConcurrentTaskLimits() { int numScheduledMaps = scheduledRequests.maps.size(); if (maxRunningMaps > 0 && numScheduledMaps > 0) { int maxRequestedMaps = Math.max(0, maxRunningMaps - assignedRequests.maps.size()); int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size(); int failedMapRequestLimit = Math.min(maxRequestedMaps, numScheduledFailMaps); int normalMapRequestLimit = Math.min( maxRequestedMaps - failedMapRequestLimit, numScheduledMaps - numScheduledFailMaps); setRequestLimit(PRIORITY_FAST_FAIL_MAP, mapResourceRequest, failedMapRequestLimit); setRequestLimit(PRIORITY_MAP, mapResourceRequest, normalMapRequestLimit); } int numScheduledReduces = scheduledRequests.reduces.size(); if (maxRunningReduces > 0 && numScheduledReduces > 0) { int maxRequestedReduces = Math.max(0, maxRunningReduces - assignedRequests.reduces.size()); int reduceRequestLimit = Math.min(maxRequestedReduces, numScheduledReduces); setRequestLimit(PRIORITY_REDUCE, reduceResourceRequest, reduceRequestLimit); } } private boolean canAssignMaps() { return (maxRunningMaps <= 0 || assignedRequests.maps.size() < maxRunningMaps); } private boolean canAssignReduces() { return (maxRunningReduces <= 0 || assignedRequests.reduces.size() < maxRunningReduces); } private void updateAMRMToken(Token token) throws IOException { org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token .getIdentifier().array(), token.getPassword().array(), new Text( token.getKind()), new Text(token.getService())); UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser(); currentUGI.addToken(amrmToken); amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig())); } @VisibleForTesting public TaskAttemptEvent createContainerFinishedEvent(ContainerStatus cont, TaskAttemptId attemptID) { if (cont.getExitStatus() == ContainerExitStatus.ABORTED || cont.getExitStatus() == ContainerExitStatus.PREEMPTED) { // killed by framework return new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_KILL); } else { return new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_CONTAINER_COMPLETED); } } @SuppressWarnings("unchecked") private void handleUpdatedNodes(AllocateResponse response) { // send event to the job about on updated nodes List<NodeReport> updatedNodes = response.getUpdatedNodes(); if (!updatedNodes.isEmpty()) { // send event to the job to act upon completed tasks eventHandler.handle(new JobUpdatedNodesEvent(getJob().getID(), updatedNodes)); // act upon running tasks HashSet<NodeId> unusableNodes = new HashSet<NodeId>(); for (NodeReport nr : updatedNodes) { NodeState nodeState = nr.getNodeState(); if (nodeState.isUnusable()) { unusableNodes.add(nr.getNodeId()); } } for (int i = 0; i < 2; ++i) { HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps : assignedRequests.reduces; // kill running containers for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) { TaskAttemptId tid = entry.getKey(); NodeId taskAttemptNodeId = entry.getValue().getNodeId(); if (unusableNodes.contains(taskAttemptNodeId)) { LOG.info("Killing taskAttempt:" + tid + " because it is running on unusable node:" + taskAttemptNodeId); eventHandler.handle(new TaskAttemptKillEvent(tid, "TaskAttempt killed because it ran on unusable node" + taskAttemptNodeId)); } } } } } @Private public Resource getResourceLimit() { Resource headRoom = getAvailableResources(); if (headRoom == null) { headRoom = Resources.none(); } Resource assignedMapResource = Resources.multiply(mapResourceRequest, assignedRequests.maps.size()); Resource assignedReduceResource = Resources.multiply(reduceResourceRequest, assignedRequests.reduces.size()); return Resources.add(headRoom, Resources.add(assignedMapResource, assignedReduceResource)); } @Private @VisibleForTesting class ScheduledRequests { private final LinkedList<TaskAttemptId> earlierFailedMaps = new LinkedList<TaskAttemptId>(); /** Maps from a host to a list of Map tasks with data on the host */ private final Map<String, LinkedList<TaskAttemptId>> mapsHostMapping = new HashMap<String, LinkedList<TaskAttemptId>>(); private final Map<String, LinkedList<TaskAttemptId>> mapsRackMapping = new HashMap<String, LinkedList<TaskAttemptId>>(); @VisibleForTesting final Map<TaskAttemptId, ContainerRequest> maps = new LinkedHashMap<TaskAttemptId, ContainerRequest>(); private final LinkedHashMap<TaskAttemptId, ContainerRequest> reduces = new LinkedHashMap<TaskAttemptId, ContainerRequest>(); boolean remove(TaskAttemptId tId) { ContainerRequest req = null; if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) { req = maps.remove(tId); } else { req = reduces.remove(tId); } if (req == null) { return false; } else { decContainerReq(req); return true; } } ContainerRequest removeReduce() { Iterator<Entry<TaskAttemptId, ContainerRequest>> it = reduces.entrySet().iterator(); if (it.hasNext()) { Entry<TaskAttemptId, ContainerRequest> entry = it.next(); it.remove(); decContainerReq(entry.getValue()); return entry.getValue(); } return null; } void addMap(ContainerRequestEvent event) { ContainerRequest request = null; if (event.getEarlierAttemptFailed()) { earlierFailedMaps.add(event.getAttemptID()); request = new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP, mapNodeLabelExpression); LOG.info("Added "+event.getAttemptID()+" to list of failed maps"); } else { for (String host : event.getHosts()) { LinkedList<TaskAttemptId> list = mapsHostMapping.get(host); if (list == null) { list = new LinkedList<TaskAttemptId>(); mapsHostMapping.put(host, list); } list.add(event.getAttemptID()); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to host " + host); } } for (String rack: event.getRacks()) { LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack); if (list == null) { list = new LinkedList<TaskAttemptId>(); mapsRackMapping.put(rack, list); } list.add(event.getAttemptID()); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to rack " + rack); } } request = new ContainerRequest(event, PRIORITY_MAP, mapNodeLabelExpression); } maps.put(event.getAttemptID(), request); addContainerReq(request); } void addReduce(ContainerRequest req) { reduces.put(req.attemptID, req); addContainerReq(req); } // this method will change the list of allocatedContainers. private void assign(List<Container> allocatedContainers) { Iterator<Container> it = allocatedContainers.iterator(); LOG.info("Got allocated containers " + allocatedContainers.size()); containersAllocated += allocatedContainers.size(); while (it.hasNext()) { Container allocated = it.next(); if (LOG.isDebugEnabled()) { LOG.debug("Assigning container " + allocated.getId() + " with priority " + allocated.getPriority() + " to NM " + allocated.getNodeId()); } // check if allocated container meets memory requirements // and whether we have any scheduled tasks that need // a container to be assigned boolean isAssignable = true; Priority priority = allocated.getPriority(); Resource allocatedResource = allocated.getResource(); if (PRIORITY_FAST_FAIL_MAP.equals(priority) || PRIORITY_MAP.equals(priority)) { if (ResourceCalculatorUtils.computeAvailableContainers(allocatedResource, mapResourceRequest, getSchedulerResourceTypes()) <= 0 || maps.isEmpty()) { LOG.info("Cannot assign container " + allocated + " for a map as either " + " container memory less than required " + mapResourceRequest + " or no pending map tasks - maps.isEmpty=" + maps.isEmpty()); isAssignable = false; } } else if (PRIORITY_REDUCE.equals(priority)) { if (ResourceCalculatorUtils.computeAvailableContainers(allocatedResource, reduceResourceRequest, getSchedulerResourceTypes()) <= 0 || reduces.isEmpty()) { LOG.info("Cannot assign container " + allocated + " for a reduce as either " + " container memory less than required " + reduceResourceRequest + " or no pending reduce tasks - reduces.isEmpty=" + reduces.isEmpty()); isAssignable = false; } } else { LOG.warn("Container allocated at unwanted priority: " + priority + ". Returning to RM..."); isAssignable = false; } if(!isAssignable) { // release container if we could not assign it containerNotAssigned(allocated); it.remove(); continue; } // do not assign if allocated container is on a // blacklisted host String allocatedHost = allocated.getNodeId().getHost(); if (isNodeBlacklisted(allocatedHost)) { // we need to request for a new container // and release the current one LOG.info("Got allocated container on a blacklisted " + " host "+allocatedHost +". Releasing container " + allocated); // find the request matching this allocated container // and replace it with a new one ContainerRequest toBeReplacedReq = getContainerReqToReplace(allocated); if (toBeReplacedReq != null) { LOG.info("Placing a new container request for task attempt " + toBeReplacedReq.attemptID); ContainerRequest newReq = getFilteredContainerRequest(toBeReplacedReq); decContainerReq(toBeReplacedReq); if (toBeReplacedReq.attemptID.getTaskId().getTaskType() == TaskType.MAP) { maps.put(newReq.attemptID, newReq); } else { reduces.put(newReq.attemptID, newReq); } addContainerReq(newReq); } else { LOG.info("Could not map allocated container to a valid request." + " Releasing allocated container " + allocated); } // release container if we could not assign it containerNotAssigned(allocated); it.remove(); continue; } } assignContainers(allocatedContainers); // release container if we could not assign it it = allocatedContainers.iterator(); while (it.hasNext()) { Container allocated = it.next(); LOG.info("Releasing unassigned container " + allocated); containerNotAssigned(allocated); } } @SuppressWarnings("unchecked") private void containerAssigned(Container allocated, ContainerRequest assigned) { // Update resource requests decContainerReq(assigned); // send the container-assigned event to task attempt eventHandler.handle(new TaskAttemptContainerAssignedEvent( assigned.attemptID, allocated, applicationACLs)); assignedRequests.add(allocated, assigned.attemptID); if (LOG.isDebugEnabled()) { LOG.info("Assigned container (" + allocated + ") " + " to task " + assigned.attemptID + " on node " + allocated.getNodeId().toString()); } } private void containerNotAssigned(Container allocated) { containersReleased++; pendingRelease.add(allocated.getId()); release(allocated.getId()); } private ContainerRequest assignWithoutLocality(Container allocated) { ContainerRequest assigned = null; Priority priority = allocated.getPriority(); if (PRIORITY_FAST_FAIL_MAP.equals(priority)) { LOG.info("Assigning container " + allocated + " to fast fail map"); assigned = assignToFailedMap(allocated); } else if (PRIORITY_REDUCE.equals(priority)) { if (LOG.isDebugEnabled()) { LOG.debug("Assigning container " + allocated + " to reduce"); } assigned = assignToReduce(allocated); } return assigned; } private void assignContainers(List<Container> allocatedContainers) { Iterator<Container> it = allocatedContainers.iterator(); while (it.hasNext()) { Container allocated = it.next(); ContainerRequest assigned = assignWithoutLocality(allocated); if (assigned != null) { containerAssigned(allocated, assigned); it.remove(); } } assignMapsWithLocality(allocatedContainers); } private ContainerRequest getContainerReqToReplace(Container allocated) { LOG.info("Finding containerReq for allocated container: " + allocated); Priority priority = allocated.getPriority(); ContainerRequest toBeReplaced = null; if (PRIORITY_FAST_FAIL_MAP.equals(priority)) { LOG.info("Replacing FAST_FAIL_MAP container " + allocated.getId()); Iterator<TaskAttemptId> iter = earlierFailedMaps.iterator(); while (toBeReplaced == null && iter.hasNext()) { toBeReplaced = maps.get(iter.next()); } LOG.info("Found replacement: " + toBeReplaced); return toBeReplaced; } else if (PRIORITY_MAP.equals(priority)) { LOG.info("Replacing MAP container " + allocated.getId()); // allocated container was for a map String host = allocated.getNodeId().getHost(); LinkedList<TaskAttemptId> list = mapsHostMapping.get(host); if (list != null && list.size() > 0) { TaskAttemptId tId = list.removeLast(); if (maps.containsKey(tId)) { toBeReplaced = maps.remove(tId); } } else { TaskAttemptId tId = maps.keySet().iterator().next(); toBeReplaced = maps.remove(tId); } } else if (PRIORITY_REDUCE.equals(priority)) { TaskAttemptId tId = reduces.keySet().iterator().next(); toBeReplaced = reduces.remove(tId); } LOG.info("Found replacement: " + toBeReplaced); return toBeReplaced; } @SuppressWarnings("unchecked") private ContainerRequest assignToFailedMap(Container allocated) { //try to assign to earlierFailedMaps if present ContainerRequest assigned = null; while (assigned == null && earlierFailedMaps.size() > 0 && canAssignMaps()) { TaskAttemptId tId = earlierFailedMaps.removeFirst(); if (maps.containsKey(tId)) { assigned = maps.remove(tId); JobCounterUpdateEvent jce = new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId()); jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1); eventHandler.handle(jce); LOG.info("Assigned from earlierFailedMaps"); break; } } return assigned; } private ContainerRequest assignToReduce(Container allocated) { ContainerRequest assigned = null; //try to assign to reduces if present if (assigned == null && reduces.size() > 0 && canAssignReduces()) { TaskAttemptId tId = reduces.keySet().iterator().next(); assigned = reduces.remove(tId); LOG.info("Assigned to reduce"); } return assigned; } @SuppressWarnings("unchecked") private void assignMapsWithLocality(List<Container> allocatedContainers) { // try to assign to all nodes first to match node local Iterator<Container> it = allocatedContainers.iterator(); while(it.hasNext() && maps.size() > 0 && canAssignMaps()){ Container allocated = it.next(); Priority priority = allocated.getPriority(); assert PRIORITY_MAP.equals(priority); // "if (maps.containsKey(tId))" below should be almost always true. // hence this while loop would almost always have O(1) complexity String host = allocated.getNodeId().getHost(); LinkedList<TaskAttemptId> list = mapsHostMapping.get(host); while (list != null && list.size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Host matched to the request list " + host); } TaskAttemptId tId = list.removeFirst(); if (maps.containsKey(tId)) { ContainerRequest assigned = maps.remove(tId); containerAssigned(allocated, assigned); it.remove(); JobCounterUpdateEvent jce = new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId()); jce.addCounterUpdate(JobCounter.DATA_LOCAL_MAPS, 1); eventHandler.handle(jce); hostLocalAssigned++; if (LOG.isDebugEnabled()) { LOG.debug("Assigned based on host match " + host); } break; } } } // try to match all rack local it = allocatedContainers.iterator(); while(it.hasNext() && maps.size() > 0 && canAssignMaps()){ Container allocated = it.next(); Priority priority = allocated.getPriority(); assert PRIORITY_MAP.equals(priority); // "if (maps.containsKey(tId))" below should be almost always true. // hence this while loop would almost always have O(1) complexity String host = allocated.getNodeId().getHost(); String rack = RackResolver.resolve(host).getNetworkLocation(); LinkedList<TaskAttemptId> list = mapsRackMapping.get(rack); while (list != null && list.size() > 0) { TaskAttemptId tId = list.removeFirst(); if (maps.containsKey(tId)) { ContainerRequest assigned = maps.remove(tId); containerAssigned(allocated, assigned); it.remove(); JobCounterUpdateEvent jce = new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId()); jce.addCounterUpdate(JobCounter.RACK_LOCAL_MAPS, 1); eventHandler.handle(jce); rackLocalAssigned++; if (LOG.isDebugEnabled()) { LOG.debug("Assigned based on rack match " + rack); } break; } } } // assign remaining it = allocatedContainers.iterator(); while(it.hasNext() && maps.size() > 0 && canAssignMaps()){ Container allocated = it.next(); Priority priority = allocated.getPriority(); assert PRIORITY_MAP.equals(priority); TaskAttemptId tId = maps.keySet().iterator().next(); ContainerRequest assigned = maps.remove(tId); containerAssigned(allocated, assigned); it.remove(); JobCounterUpdateEvent jce = new JobCounterUpdateEvent(assigned.attemptID.getTaskId().getJobId()); jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1); eventHandler.handle(jce); if (LOG.isDebugEnabled()) { LOG.debug("Assigned based on * match"); } } } } @Private @VisibleForTesting class AssignedRequests { private final Map<ContainerId, TaskAttemptId> containerToAttemptMap = new HashMap<ContainerId, TaskAttemptId>(); private final LinkedHashMap<TaskAttemptId, Container> maps = new LinkedHashMap<TaskAttemptId, Container>(); @VisibleForTesting final LinkedHashMap<TaskAttemptId, Container> reduces = new LinkedHashMap<TaskAttemptId, Container>(); @VisibleForTesting final Set<TaskAttemptId> preemptionWaitingReduces = new HashSet<TaskAttemptId>(); void add(Container container, TaskAttemptId tId) { LOG.info("Assigned container " + container.getId().toString() + " to " + tId); containerToAttemptMap.put(container.getId(), tId); if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) { maps.put(tId, container); } else { reduces.put(tId, container); } } @SuppressWarnings("unchecked") void preemptReduce(int toPreempt) { List<TaskAttemptId> reduceList = new ArrayList<TaskAttemptId> (reduces.keySet()); //sort reduces on progress Collections.sort(reduceList, new Comparator<TaskAttemptId>() { @Override public int compare(TaskAttemptId o1, TaskAttemptId o2) { return Float.compare( getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress(), getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress()); } }); for (int i = 0; i < toPreempt && reduceList.size() > 0; i++) { TaskAttemptId id = reduceList.remove(0);//remove the one on top LOG.info("Preempting " + id); preemptionWaitingReduces.add(id); eventHandler.handle(new TaskAttemptKillEvent(id, RAMPDOWN_DIAGNOSTIC)); } } boolean remove(TaskAttemptId tId) { ContainerId containerId = null; if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) { containerId = maps.remove(tId).getId(); } else { containerId = reduces.remove(tId).getId(); if (containerId != null) { boolean preempted = preemptionWaitingReduces.remove(tId); if (preempted) { LOG.info("Reduce preemption successful " + tId); } } } if (containerId != null) { containerToAttemptMap.remove(containerId); return true; } return false; } TaskAttemptId get(ContainerId cId) { return containerToAttemptMap.get(cId); } ContainerId get(TaskAttemptId tId) { Container taskContainer; if (tId.getTaskId().getTaskType().equals(TaskType.MAP)) { taskContainer = maps.get(tId); } else { taskContainer = reduces.get(tId); } if (taskContainer == null) { return null; } else { return taskContainer.getId(); } } } private class ScheduleStats { int numPendingReduces; int numScheduledMaps; int numScheduledReduces; int numAssignedMaps; int numAssignedReduces; int numCompletedMaps; int numCompletedReduces; int numContainersAllocated; int numContainersReleased; public void updateAndLogIfChanged(String msgPrefix) { boolean changed = false; // synchronized to fix findbug warnings synchronized (RMContainerAllocator.this) { changed |= (numPendingReduces != pendingReduces.size()); numPendingReduces = pendingReduces.size(); changed |= (numScheduledMaps != scheduledRequests.maps.size()); numScheduledMaps = scheduledRequests.maps.size(); changed |= (numScheduledReduces != scheduledRequests.reduces.size()); numScheduledReduces = scheduledRequests.reduces.size(); changed |= (numAssignedMaps != assignedRequests.maps.size()); numAssignedMaps = assignedRequests.maps.size(); changed |= (numAssignedReduces != assignedRequests.reduces.size()); numAssignedReduces = assignedRequests.reduces.size(); changed |= (numCompletedMaps != getJob().getCompletedMaps()); numCompletedMaps = getJob().getCompletedMaps(); changed |= (numCompletedReduces != getJob().getCompletedReduces()); numCompletedReduces = getJob().getCompletedReduces(); changed |= (numContainersAllocated != containersAllocated); numContainersAllocated = containersAllocated; changed |= (numContainersReleased != containersReleased); numContainersReleased = containersReleased; } if (changed) { log(msgPrefix); } } public void log(String msgPrefix) { LOG.info(msgPrefix + "PendingReds:" + numPendingReduces + " ScheduledMaps:" + numScheduledMaps + " ScheduledReds:" + numScheduledReduces + " AssignedMaps:" + numAssignedMaps + " AssignedReds:" + numAssignedReduces + " CompletedMaps:" + numCompletedMaps + " CompletedReds:" + numCompletedReduces + " ContAlloc:" + numContainersAllocated + " ContRel:" + numContainersReleased + " HostLocal:" + hostLocalAssigned + " RackLocal:" + rackLocalAssigned); } } }
56,113
37.860111
108
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocatorEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; public class ContainerAllocatorEvent extends AbstractEvent<ContainerAllocator.EventType> { private TaskAttemptId attemptID; public ContainerAllocatorEvent(TaskAttemptId attemptID, ContainerAllocator.EventType type) { super(type); this.attemptID = attemptID; } public TaskAttemptId getAttemptID() { return attemptID; } }
1,314
33.605263
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.classification.InterfaceAudience;
939
43.761905
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerRequestEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.yarn.api.records.Resource; public class ContainerRequestEvent extends ContainerAllocatorEvent { private final Resource capability; private final String[] hosts; private final String[] racks; private boolean earlierAttemptFailed = false; public ContainerRequestEvent(TaskAttemptId attemptID, Resource capability, String[] hosts, String[] racks) { super(attemptID, ContainerAllocator.EventType.CONTAINER_REQ); this.capability = capability; this.hosts = hosts; this.racks = racks; } ContainerRequestEvent(TaskAttemptId attemptID, Resource capability) { this(attemptID, capability, new String[0], new String[0]); this.earlierAttemptFailed = true; } public static ContainerRequestEvent createContainerRequestEventForFailedContainer( TaskAttemptId attemptID, Resource capability) { //ContainerRequest for failed events does not consider rack / node locality? return new ContainerRequestEvent(attemptID, capability); } public Resource getCapability() { return capability; } public String[] getHosts() { return hosts; } public String[] getRacks() { return racks; } public boolean getEarlierAttemptFailed() { return earlierAttemptFailed; } }
2,192
31.25
84
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerFailedEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; public class ContainerFailedEvent extends ContainerAllocatorEvent { private final String contMgrAddress; public ContainerFailedEvent(TaskAttemptId attemptID, String contMgrAddr) { super(attemptID, ContainerAllocator.EventType.CONTAINER_FAILED); this.contMgrAddress = contMgrAddr; } public String getContMgrAddress() { return contMgrAddress; } }
1,295
34.027027
76
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import com.google.common.annotations.VisibleForTesting; /** * Keeps the data structures to send container requests to RM. */ public abstract class RMContainerRequestor extends RMCommunicator { private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class); private static final ResourceRequestComparator RESOURCE_REQUEST_COMPARATOR = new ResourceRequestComparator(); protected int lastResponseID; private Resource availableResources; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); //Key -> Priority //Value -> Map //Key->ResourceName (e.g., hostname, rackname, *) //Value->Map //Key->Resource Capability //Value->ResourceRequest private final Map<Priority, Map<String, Map<Resource, ResourceRequest>>> remoteRequestsTable = new TreeMap<Priority, Map<String, Map<Resource, ResourceRequest>>>(); // use custom comparator to make sure ResourceRequest objects differing only in // numContainers dont end up as duplicates private final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>( RESOURCE_REQUEST_COMPARATOR); private final Set<ContainerId> release = new TreeSet<ContainerId>(); // pendingRelease holds history or release requests.request is removed only if // RM sends completedContainer. // How it different from release? --> release is for per allocate() request. protected Set<ContainerId> pendingRelease = new TreeSet<ContainerId>(); private final Map<ResourceRequest,ResourceRequest> requestLimits = new TreeMap<ResourceRequest,ResourceRequest>(RESOURCE_REQUEST_COMPARATOR); private final Set<ResourceRequest> requestLimitsToUpdate = new TreeSet<ResourceRequest>(RESOURCE_REQUEST_COMPARATOR); private boolean nodeBlacklistingEnabled; private int blacklistDisablePercent; private AtomicBoolean ignoreBlacklisting = new AtomicBoolean(false); private int blacklistedNodeCount = 0; private int lastClusterNmCount = 0; private int clusterNmCount = 0; private int maxTaskFailuresPerNode; private final Map<String, Integer> nodeFailures = new HashMap<String, Integer>(); private final Set<String> blacklistedNodes = Collections .newSetFromMap(new ConcurrentHashMap<String, Boolean>()); private final Set<String> blacklistAdditions = Collections .newSetFromMap(new ConcurrentHashMap<String, Boolean>()); private final Set<String> blacklistRemovals = Collections .newSetFromMap(new ConcurrentHashMap<String, Boolean>()); public RMContainerRequestor(ClientService clientService, AppContext context) { super(clientService, context); } @Private @VisibleForTesting static class ContainerRequest { final TaskAttemptId attemptID; final Resource capability; final String[] hosts; final String[] racks; //final boolean earlierAttemptFailed; final Priority priority; final String nodeLabelExpression; /** * the time when this request object was formed; can be used to avoid * aggressive preemption for recently placed requests */ final long requestTimeMs; public ContainerRequest(ContainerRequestEvent event, Priority priority, String nodeLabelExpression) { this(event.getAttemptID(), event.getCapability(), event.getHosts(), event.getRacks(), priority, nodeLabelExpression); } public ContainerRequest(ContainerRequestEvent event, Priority priority, long requestTimeMs) { this(event.getAttemptID(), event.getCapability(), event.getHosts(), event.getRacks(), priority, requestTimeMs,null); } public ContainerRequest(TaskAttemptId attemptID, Resource capability, String[] hosts, String[] racks, Priority priority, String nodeLabelExpression) { this(attemptID, capability, hosts, racks, priority, System.currentTimeMillis(), nodeLabelExpression); } public ContainerRequest(TaskAttemptId attemptID, Resource capability, String[] hosts, String[] racks, Priority priority, long requestTimeMs,String nodeLabelExpression) { this.attemptID = attemptID; this.capability = capability; this.hosts = hosts; this.racks = racks; this.priority = priority; this.requestTimeMs = requestTimeMs; this.nodeLabelExpression = nodeLabelExpression; } public String toString() { StringBuilder sb = new StringBuilder(); sb.append("AttemptId[").append(attemptID).append("]"); sb.append("Capability[").append(capability).append("]"); sb.append("Priority[").append(priority).append("]"); return sb.toString(); } } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); nodeBlacklistingEnabled = conf.getBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true); LOG.info("nodeBlacklistingEnabled:" + nodeBlacklistingEnabled); maxTaskFailuresPerNode = conf.getInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 3); blacklistDisablePercent = conf.getInt( MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, MRJobConfig.DEFAULT_MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERCENT); LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode); if (blacklistDisablePercent < -1 || blacklistDisablePercent > 100) { throw new YarnRuntimeException("Invalid blacklistDisablePercent: " + blacklistDisablePercent + ". Should be an integer between 0 and 100 or -1 to disabled"); } LOG.info("blacklistDisablePercent is " + blacklistDisablePercent); } protected AllocateResponse makeRemoteRequest() throws YarnException, IOException { applyRequestLimits(); ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance(new ArrayList<String>(blacklistAdditions), new ArrayList<String>(blacklistRemovals)); AllocateRequest allocateRequest = AllocateRequest.newInstance(lastResponseID, super.getApplicationProgress(), new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(release), blacklistRequest); AllocateResponse allocateResponse = scheduler.allocate(allocateRequest); lastResponseID = allocateResponse.getResponseId(); availableResources = allocateResponse.getAvailableResources(); lastClusterNmCount = clusterNmCount; clusterNmCount = allocateResponse.getNumClusterNodes(); int numCompletedContainers = allocateResponse.getCompletedContainersStatuses().size(); if (ask.size() > 0 || release.size() > 0) { LOG.info("getResources() for " + applicationId + ":" + " ask=" + ask.size() + " release= " + release.size() + " newContainers=" + allocateResponse.getAllocatedContainers().size() + " finishedContainers=" + numCompletedContainers + " resourcelimit=" + availableResources + " knownNMs=" + clusterNmCount); } ask.clear(); release.clear(); if (numCompletedContainers > 0) { // re-send limited requests when a container completes to trigger asking // for more containers requestLimitsToUpdate.addAll(requestLimits.keySet()); } if (blacklistAdditions.size() > 0 || blacklistRemovals.size() > 0) { LOG.info("Update the blacklist for " + applicationId + ": blacklistAdditions=" + blacklistAdditions.size() + " blacklistRemovals=" + blacklistRemovals.size()); } blacklistAdditions.clear(); blacklistRemovals.clear(); return allocateResponse; } private void applyRequestLimits() { Iterator<ResourceRequest> iter = requestLimits.values().iterator(); while (iter.hasNext()) { ResourceRequest reqLimit = iter.next(); int limit = reqLimit.getNumContainers(); Map<String, Map<Resource, ResourceRequest>> remoteRequests = remoteRequestsTable.get(reqLimit.getPriority()); Map<Resource, ResourceRequest> reqMap = (remoteRequests != null) ? remoteRequests.get(ResourceRequest.ANY) : null; ResourceRequest req = (reqMap != null) ? reqMap.get(reqLimit.getCapability()) : null; if (req == null) { continue; } // update an existing ask or send a new one if updating if (ask.remove(req) || requestLimitsToUpdate.contains(req)) { ResourceRequest newReq = req.getNumContainers() > limit ? reqLimit : req; ask.add(newReq); LOG.info("Applying ask limit of " + newReq.getNumContainers() + " for priority:" + reqLimit.getPriority() + " and capability:" + reqLimit.getCapability()); } if (limit == Integer.MAX_VALUE) { iter.remove(); } } requestLimitsToUpdate.clear(); } protected void addOutstandingRequestOnResync() { for (Map<String, Map<Resource, ResourceRequest>> rr : remoteRequestsTable .values()) { for (Map<Resource, ResourceRequest> capabalities : rr.values()) { for (ResourceRequest request : capabalities.values()) { addResourceRequestToAsk(request); } } } if (!ignoreBlacklisting.get()) { blacklistAdditions.addAll(blacklistedNodes); } if (!pendingRelease.isEmpty()) { release.addAll(pendingRelease); } requestLimitsToUpdate.addAll(requestLimits.keySet()); } // May be incorrect if there's multiple NodeManagers running on a single host. // knownNodeCount is based on node managers, not hosts. blacklisting is // currently based on hosts. protected void computeIgnoreBlacklisting() { if (!nodeBlacklistingEnabled) { return; } if (blacklistDisablePercent != -1 && (blacklistedNodeCount != blacklistedNodes.size() || clusterNmCount != lastClusterNmCount)) { blacklistedNodeCount = blacklistedNodes.size(); if (clusterNmCount == 0) { LOG.info("KnownNode Count at 0. Not computing ignoreBlacklisting"); return; } int val = (int) ((float) blacklistedNodes.size() / clusterNmCount * 100); if (val >= blacklistDisablePercent) { if (ignoreBlacklisting.compareAndSet(false, true)) { LOG.info("Ignore blacklisting set to true. Known: " + clusterNmCount + ", Blacklisted: " + blacklistedNodeCount + ", " + val + "%"); // notify RM to ignore all the blacklisted nodes blacklistAdditions.clear(); blacklistRemovals.addAll(blacklistedNodes); } } else { if (ignoreBlacklisting.compareAndSet(true, false)) { LOG.info("Ignore blacklisting set to false. Known: " + clusterNmCount + ", Blacklisted: " + blacklistedNodeCount + ", " + val + "%"); // notify RM of all the blacklisted nodes blacklistAdditions.addAll(blacklistedNodes); blacklistRemovals.clear(); } } } } protected void containerFailedOnHost(String hostName) { if (!nodeBlacklistingEnabled) { return; } if (blacklistedNodes.contains(hostName)) { if (LOG.isDebugEnabled()) { LOG.debug("Host " + hostName + " is already blacklisted."); } return; //already blacklisted } Integer failures = nodeFailures.remove(hostName); failures = failures == null ? Integer.valueOf(0) : failures; failures++; LOG.info(failures + " failures on node " + hostName); if (failures >= maxTaskFailuresPerNode) { blacklistedNodes.add(hostName); if (!ignoreBlacklisting.get()) { blacklistAdditions.add(hostName); } //Even if blacklisting is ignored, continue to remove the host from // the request table. The RM may have additional nodes it can allocate on. LOG.info("Blacklisted host " + hostName); //remove all the requests corresponding to this hostname for (Map<String, Map<Resource, ResourceRequest>> remoteRequests : remoteRequestsTable.values()){ //remove from host if no pending allocations boolean foundAll = true; Map<Resource, ResourceRequest> reqMap = remoteRequests.get(hostName); if (reqMap != null) { for (ResourceRequest req : reqMap.values()) { if (!ask.remove(req)) { foundAll = false; // if ask already sent to RM, we can try and overwrite it if possible. // send a new ask to RM with numContainers // specified for the blacklisted host to be 0. ResourceRequest zeroedRequest = ResourceRequest.newInstance(req.getPriority(), req.getResourceName(), req.getCapability(), req.getNumContainers(), req.getRelaxLocality()); zeroedRequest.setNumContainers(0); // to be sent to RM on next heartbeat addResourceRequestToAsk(zeroedRequest); } } // if all requests were still in ask queue // we can remove this request if (foundAll) { remoteRequests.remove(hostName); } } // TODO handling of rack blacklisting // Removing from rack should be dependent on no. of failures within the rack // Blacklisting a rack on the basis of a single node's blacklisting // may be overly aggressive. // Node failures could be co-related with other failures on the same rack // but we probably need a better approach at trying to decide how and when // to blacklist a rack } } else { nodeFailures.put(hostName, failures); } } protected Resource getAvailableResources() { return availableResources; } protected void addContainerReq(ContainerRequest req) { // Create resource requests for (String host : req.hosts) { // Data-local if (!isNodeBlacklisted(host)) { addResourceRequest(req.priority, host, req.capability, null); } } // Nothing Rack-local for now for (String rack : req.racks) { addResourceRequest(req.priority, rack, req.capability, null); } // Off-switch addResourceRequest(req.priority, ResourceRequest.ANY, req.capability, req.nodeLabelExpression); } protected void decContainerReq(ContainerRequest req) { // Update resource requests for (String hostName : req.hosts) { decResourceRequest(req.priority, hostName, req.capability); } for (String rack : req.racks) { decResourceRequest(req.priority, rack, req.capability); } decResourceRequest(req.priority, ResourceRequest.ANY, req.capability); } private void addResourceRequest(Priority priority, String resourceName, Resource capability, String nodeLabelExpression) { Map<String, Map<Resource, ResourceRequest>> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { remoteRequests = new HashMap<String, Map<Resource, ResourceRequest>>(); this.remoteRequestsTable.put(priority, remoteRequests); if (LOG.isDebugEnabled()) { LOG.debug("Added priority=" + priority); } } Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName); if (reqMap == null) { reqMap = new HashMap<Resource, ResourceRequest>(); remoteRequests.put(resourceName, reqMap); } ResourceRequest remoteRequest = reqMap.get(capability); if (remoteRequest == null) { remoteRequest = recordFactory.newRecordInstance(ResourceRequest.class); remoteRequest.setPriority(priority); remoteRequest.setResourceName(resourceName); remoteRequest.setCapability(capability); remoteRequest.setNumContainers(0); remoteRequest.setNodeLabelExpression(nodeLabelExpression); reqMap.put(capability, remoteRequest); } remoteRequest.setNumContainers(remoteRequest.getNumContainers() + 1); // Note this down for next interaction with ResourceManager addResourceRequestToAsk(remoteRequest); if (LOG.isDebugEnabled()) { LOG.debug("addResourceRequest:" + " applicationId=" + applicationId.getId() + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + remoteRequest.getNumContainers() + " #asks=" + ask.size()); } } private void decResourceRequest(Priority priority, String resourceName, Resource capability) { Map<String, Map<Resource, ResourceRequest>> remoteRequests = this.remoteRequestsTable.get(priority); Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName); if (reqMap == null) { // as we modify the resource requests by filtering out blacklisted hosts // when they are added, this value may be null when being // decremented if (LOG.isDebugEnabled()) { LOG.debug("Not decrementing resource as " + resourceName + " is not present in request table"); } return; } ResourceRequest remoteRequest = reqMap.get(capability); if (LOG.isDebugEnabled()) { LOG.debug("BEFORE decResourceRequest:" + " applicationId=" + applicationId.getId() + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + remoteRequest.getNumContainers() + " #asks=" + ask.size()); } if(remoteRequest.getNumContainers() > 0) { // based on blacklisting comments above we can end up decrementing more // than requested. so guard for that. remoteRequest.setNumContainers(remoteRequest.getNumContainers() -1); } if (remoteRequest.getNumContainers() == 0) { reqMap.remove(capability); if (reqMap.size() == 0) { remoteRequests.remove(resourceName); } if (remoteRequests.size() == 0) { remoteRequestsTable.remove(priority); } } // send the updated resource request to RM // send 0 container count requests also to cancel previous requests addResourceRequestToAsk(remoteRequest); if (LOG.isDebugEnabled()) { LOG.info("AFTER decResourceRequest:" + " applicationId=" + applicationId.getId() + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + remoteRequest.getNumContainers() + " #asks=" + ask.size()); } } private void addResourceRequestToAsk(ResourceRequest remoteRequest) { // because objects inside the resource map can be deleted ask can end up // containing an object that matches new resource object but with different // numContainers. So existing values must be replaced explicitly ask.remove(remoteRequest); ask.add(remoteRequest); } protected void release(ContainerId containerId) { release.add(containerId); } protected boolean isNodeBlacklisted(String hostname) { if (!nodeBlacklistingEnabled || ignoreBlacklisting.get()) { return false; } return blacklistedNodes.contains(hostname); } protected ContainerRequest getFilteredContainerRequest(ContainerRequest orig) { ArrayList<String> newHosts = new ArrayList<String>(); for (String host : orig.hosts) { if (!isNodeBlacklisted(host)) { newHosts.add(host); } } String[] hosts = newHosts.toArray(new String[newHosts.size()]); ContainerRequest newReq = new ContainerRequest(orig.attemptID, orig.capability, hosts, orig.racks, orig.priority, orig.nodeLabelExpression); return newReq; } protected void setRequestLimit(Priority priority, Resource capability, int limit) { if (limit < 0) { limit = Integer.MAX_VALUE; } ResourceRequest newReqLimit = ResourceRequest.newInstance(priority, ResourceRequest.ANY, capability, limit); ResourceRequest oldReqLimit = requestLimits.put(newReqLimit, newReqLimit); if (oldReqLimit == null || oldReqLimit.getNumContainers() < limit) { requestLimitsToUpdate.add(newReqLimit); } } public Set<String> getBlacklistedNodes() { return blacklistedNodes; } }
22,608
39.015929
87
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMHeartbeatHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; public interface RMHeartbeatHandler { long getLastHeartbeatTime(); void runOnNextHeartbeat(Runnable callback); }
974
36.5
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.EnumSet; import java.util.Map; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; /** * Registers/unregisters to RM and sends heartbeats to RM. */ public abstract class RMCommunicator extends AbstractService implements RMHeartbeatHandler { private static final Log LOG = LogFactory.getLog(RMCommunicator.class); private int rmPollInterval;//millis protected ApplicationId applicationId; private final AtomicBoolean stopped; protected Thread allocatorThread; @SuppressWarnings("rawtypes") protected EventHandler eventHandler; protected ApplicationMasterProtocol scheduler; private final ClientService clientService; private Resource maxContainerCapability; protected Map<ApplicationAccessType, String> applicationACLs; private volatile long lastHeartbeatTime; private ConcurrentLinkedQueue<Runnable> heartbeatCallbacks; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private final AppContext context; private Job job; // Has a signal (SIGTERM etc) been issued? protected volatile boolean isSignalled = false; private volatile boolean shouldUnregister = true; private boolean isApplicationMasterRegistered = false; private EnumSet<SchedulerResourceTypes> schedulerResourceTypes; public RMCommunicator(ClientService clientService, AppContext context) { super("RMCommunicator"); this.clientService = clientService; this.context = context; this.eventHandler = context.getEventHandler(); this.applicationId = context.getApplicationID(); this.stopped = new AtomicBoolean(false); this.heartbeatCallbacks = new ConcurrentLinkedQueue<Runnable>(); this.schedulerResourceTypes = EnumSet.of(SchedulerResourceTypes.MEMORY); } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); rmPollInterval = conf.getInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, MRJobConfig.DEFAULT_MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS); } @Override protected void serviceStart() throws Exception { scheduler= createSchedulerProxy(); JobID id = TypeConverter.fromYarn(this.applicationId); JobId jobId = TypeConverter.toYarn(id); job = context.getJob(jobId); register(); startAllocatorThread(); super.serviceStart(); } protected AppContext getContext() { return context; } protected Job getJob() { return job; } /** * Get the appProgress. Can be used only after this component is started. * @return the appProgress. */ protected float getApplicationProgress() { // For now just a single job. In future when we have a DAG, we need an // aggregate progress. return this.job.getProgress(); } protected void register() { //Register InetSocketAddress serviceAddr = null; if (clientService != null ) { serviceAddr = clientService.getBindAddress(); } try { RegisterApplicationMasterRequest request = recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class); if (serviceAddr != null) { request.setHost(serviceAddr.getHostName()); request.setRpcPort(serviceAddr.getPort()); request.setTrackingUrl(MRWebAppUtil .getAMWebappScheme(getConfig()) + serviceAddr.getHostName() + ":" + clientService.getHttpPort()); } RegisterApplicationMasterResponse response = scheduler.registerApplicationMaster(request); isApplicationMasterRegistered = true; maxContainerCapability = response.getMaximumResourceCapability(); this.context.getClusterInfo().setMaxContainerCapability( maxContainerCapability); if (UserGroupInformation.isSecurityEnabled()) { setClientToAMToken(response.getClientToAMTokenMasterKey()); } this.applicationACLs = response.getApplicationACLs(); LOG.info("maxContainerCapability: " + maxContainerCapability); String queue = response.getQueue(); LOG.info("queue: " + queue); job.setQueueName(queue); this.schedulerResourceTypes.addAll(response.getSchedulerResourceTypes()); } catch (Exception are) { LOG.error("Exception while registering", are); throw new YarnRuntimeException(are); } } private void setClientToAMToken(ByteBuffer clientToAMTokenMasterKey) { byte[] key = clientToAMTokenMasterKey.array(); context.getClientToAMTokenSecretManager().setMasterKey(key); } protected void unregister() { try { doUnregistration(); } catch(Exception are) { LOG.error("Exception while unregistering ", are); // if unregistration failed, isLastAMRetry needs to be recalculated // to see whether AM really has the chance to retry RunningAppContext raContext = (RunningAppContext) context; raContext.resetIsLastAMRetry(); } } @VisibleForTesting protected void doUnregistration() throws YarnException, IOException, InterruptedException { FinalApplicationStatus finishState = FinalApplicationStatus.UNDEFINED; JobImpl jobImpl = (JobImpl)job; if (jobImpl.getInternalState() == JobStateInternal.SUCCEEDED) { finishState = FinalApplicationStatus.SUCCEEDED; } else if (jobImpl.getInternalState() == JobStateInternal.KILLED || (jobImpl.getInternalState() == JobStateInternal.RUNNING && isSignalled)) { finishState = FinalApplicationStatus.KILLED; } else if (jobImpl.getInternalState() == JobStateInternal.FAILED || jobImpl.getInternalState() == JobStateInternal.ERROR) { finishState = FinalApplicationStatus.FAILED; } StringBuffer sb = new StringBuffer(); for (String s : job.getDiagnostics()) { sb.append(s).append("\n"); } LOG.info("Setting job diagnostics to " + sb.toString()); String historyUrl = MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(getConfig(), context.getApplicationID()); LOG.info("History url is " + historyUrl); FinishApplicationMasterRequest request = FinishApplicationMasterRequest.newInstance(finishState, sb.toString(), historyUrl); try { while (true) { FinishApplicationMasterResponse response = scheduler.finishApplicationMaster(request); if (response.getIsUnregistered()) { // When excepting ClientService, other services are already stopped, // it is safe to let clients know the final states. ClientService // should wait for some time so clients have enough time to know the // final states. RunningAppContext raContext = (RunningAppContext) context; raContext.markSuccessfulUnregistration(); break; } LOG.info("Waiting for application to be successfully unregistered."); Thread.sleep(rmPollInterval); } } catch (ApplicationMasterNotRegisteredException e) { // RM might have restarted or failed over and so lost the fact that AM had // registered before. register(); doUnregistration(); } } protected Resource getMaxContainerCapability() { return maxContainerCapability; } @Override protected void serviceStop() throws Exception { if (stopped.getAndSet(true)) { // return if already stopped return; } if (allocatorThread != null) { allocatorThread.interrupt(); try { allocatorThread.join(); } catch (InterruptedException ie) { LOG.warn("InterruptedException while stopping", ie); } } if (isApplicationMasterRegistered && shouldUnregister) { unregister(); } super.serviceStop(); } protected void startAllocatorThread() { allocatorThread = new Thread(new Runnable() { @Override public void run() { while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { Thread.sleep(rmPollInterval); try { heartbeat(); } catch (YarnRuntimeException e) { LOG.error("Error communicating with RM: " + e.getMessage() , e); return; } catch (Exception e) { LOG.error("ERROR IN CONTACTING RM. ", e); continue; // TODO: for other exceptions } lastHeartbeatTime = context.getClock().getTime(); executeHeartbeatCallbacks(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.warn("Allocated thread interrupted. Returning."); } return; } } } }); allocatorThread.setName("RMCommunicator Allocator"); allocatorThread.start(); } protected ApplicationMasterProtocol createSchedulerProxy() { final Configuration conf = getConfig(); try { return ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class); } catch (IOException e) { throw new YarnRuntimeException(e); } } protected abstract void heartbeat() throws Exception; private void executeHeartbeatCallbacks() { Runnable callback = null; while ((callback = heartbeatCallbacks.poll()) != null) { callback.run(); } } @Override public long getLastHeartbeatTime() { return lastHeartbeatTime; } @Override public void runOnNextHeartbeat(Runnable callback) { heartbeatCallbacks.add(callback); } public void setShouldUnregister(boolean shouldUnregister) { this.shouldUnregister = shouldUnregister; LOG.info("RMCommunicator notified that shouldUnregistered is: " + shouldUnregister); } public void setSignalled(boolean isSignalled) { this.isSignalled = isSignalled; LOG.info("RMCommunicator notified that isSignalled is: " + isSignalled); } @VisibleForTesting protected boolean isApplicationMasterRegistered() { return isApplicationMasterRegistered; } public EnumSet<SchedulerResourceTypes> getSchedulerResourceTypes() { return schedulerResourceTypes; } }
13,108
35.823034
85
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; import org.apache.hadoop.yarn.util.Records; import java.util.EnumSet; public class ResourceCalculatorUtils { public static int divideAndCeil(int a, int b) { if (b == 0) { return 0; } return (a + (b - 1)) / b; } public static int computeAvailableContainers(Resource available, Resource required, EnumSet<SchedulerResourceTypes> resourceTypes) { if (resourceTypes.contains(SchedulerResourceTypes.CPU)) { return Math.min( calculateRatioOrMaxValue(available.getMemory(), required.getMemory()), calculateRatioOrMaxValue(available.getVirtualCores(), required .getVirtualCores())); } return calculateRatioOrMaxValue( available.getMemory(), required.getMemory()); } public static int divideAndCeilContainers(Resource required, Resource factor, EnumSet<SchedulerResourceTypes> resourceTypes) { if (resourceTypes.contains(SchedulerResourceTypes.CPU)) { return Math.max(divideAndCeil(required.getMemory(), factor.getMemory()), divideAndCeil(required.getVirtualCores(), factor.getVirtualCores())); } return divideAndCeil(required.getMemory(), factor.getMemory()); } private static int calculateRatioOrMaxValue(int numerator, int denominator) { if (denominator == 0) { return Integer.MAX_VALUE; } return numerator / denominator; } }
2,353
36.365079
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ContainerAllocator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.yarn.event.EventHandler; public interface ContainerAllocator extends EventHandler<ContainerAllocatorEvent>{ enum EventType { CONTAINER_REQ, CONTAINER_DEALLOCATE, CONTAINER_FAILED } }
1,068
31.393939
82
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.metrics; import org.apache.hadoop.classification.InterfaceAudience;
944
44
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/MRAppMetrics.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.metrics; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; import org.apache.hadoop.metrics2.source.JvmMetrics; @Metrics(about="MR App Metrics", context="mapred") public class MRAppMetrics { @Metric MutableCounterInt jobsSubmitted; @Metric MutableCounterInt jobsCompleted; @Metric MutableCounterInt jobsFailed; @Metric MutableCounterInt jobsKilled; @Metric MutableGaugeInt jobsPreparing; @Metric MutableGaugeInt jobsRunning; @Metric MutableCounterInt mapsLaunched; @Metric MutableCounterInt mapsCompleted; @Metric MutableCounterInt mapsFailed; @Metric MutableCounterInt mapsKilled; @Metric MutableGaugeInt mapsRunning; @Metric MutableGaugeInt mapsWaiting; @Metric MutableCounterInt reducesLaunched; @Metric MutableCounterInt reducesCompleted; @Metric MutableCounterInt reducesFailed; @Metric MutableCounterInt reducesKilled; @Metric MutableGaugeInt reducesRunning; @Metric MutableGaugeInt reducesWaiting; public static MRAppMetrics create() { return create(DefaultMetricsSystem.instance()); } public static MRAppMetrics create(MetricsSystem ms) { JvmMetrics.initSingleton("MRAppMaster", null); return ms.register(new MRAppMetrics()); } // potential instrumentation interface methods public void submittedJob(Job job) { jobsSubmitted.incr(); } public void completedJob(Job job) { jobsCompleted.incr(); } public void failedJob(Job job) { jobsFailed.incr(); } public void killedJob(Job job) { jobsKilled.incr(); } public void preparingJob(Job job) { jobsPreparing.incr(); } public void endPreparingJob(Job job) { jobsPreparing.decr(); } public void runningJob(Job job) { jobsRunning.incr(); } public void endRunningJob(Job job) { jobsRunning.decr(); } public void launchedTask(Task task) { switch (task.getType()) { case MAP: mapsLaunched.incr(); break; case REDUCE: reducesLaunched.incr(); break; } endWaitingTask(task); } public void completedTask(Task task) { switch (task.getType()) { case MAP: mapsCompleted.incr(); break; case REDUCE: reducesCompleted.incr(); break; } } public void failedTask(Task task) { switch (task.getType()) { case MAP: mapsFailed.incr(); break; case REDUCE: reducesFailed.incr(); break; } } public void killedTask(Task task) { switch (task.getType()) { case MAP: mapsKilled.incr(); break; case REDUCE: reducesKilled.incr(); break; } } public void runningTask(Task task) { switch (task.getType()) { case MAP: mapsRunning.incr(); break; case REDUCE: reducesRunning.incr(); break; } } public void endRunningTask(Task task) { switch (task.getType()) { case MAP: mapsRunning.decr(); break; case REDUCE: reducesRunning.decr(); break; } } public void waitingTask(Task task) { switch (task.getType()) { case MAP: mapsWaiting.incr(); break; case REDUCE: reducesWaiting.incr(); } } public void endWaitingTask(Task task) { switch (task.getType()) { case MAP: mapsWaiting.decr(); break; case REDUCE: reducesWaiting.decr(); break; } } }
4,671
24.530055
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.launcher; import org.apache.hadoop.classification.InterfaceAudience;
945
44.047619
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.launcher; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.ShuffleHandler; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This class is responsible for launching of containers. */ public class ContainerLauncherImpl extends AbstractService implements ContainerLauncher { static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.class); private ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>(); private final AppContext context; protected ThreadPoolExecutor launcherPool; protected int initialPoolSize; private int limitOnPoolSize; private Thread eventHandlingThread; protected BlockingQueue<ContainerLauncherEvent> eventQueue = new LinkedBlockingQueue<ContainerLauncherEvent>(); private final AtomicBoolean stopped; private ContainerManagementProtocolProxy cmProxy; private Container getContainer(ContainerLauncherEvent event) { ContainerId id = event.getContainerID(); Container c = containers.get(id); if(c == null) { c = new Container(event.getTaskAttemptID(), event.getContainerID(), event.getContainerMgrAddress()); Container old = containers.putIfAbsent(id, c); if(old != null) { c = old; } } return c; } private void removeContainerIfDone(ContainerId id) { Container c = containers.get(id); if(c != null && c.isCompletelyDone()) { containers.remove(id); } } private static enum ContainerState { PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH } private class Container { private ContainerState state; // store enough information to be able to cleanup the container private TaskAttemptId taskAttemptID; private ContainerId containerID; final private String containerMgrAddress; public Container(TaskAttemptId taId, ContainerId containerID, String containerMgrAddress) { this.state = ContainerState.PREP; this.taskAttemptID = taId; this.containerMgrAddress = containerMgrAddress; this.containerID = containerID; } public synchronized boolean isCompletelyDone() { return state == ContainerState.DONE || state == ContainerState.FAILED; } public synchronized void done() { state = ContainerState.DONE; } @SuppressWarnings("unchecked") public synchronized void launch(ContainerRemoteLaunchEvent event) { LOG.info("Launching " + taskAttemptID); if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) { state = ContainerState.DONE; sendContainerLaunchFailedMsg(taskAttemptID, "Container was killed before it was launched"); return; } ContainerManagementProtocolProxyData proxy = null; try { proxy = getCMProxy(containerMgrAddress, containerID); // Construct the actual Container ContainerLaunchContext containerLaunchContext = event.getContainerLaunchContext(); // Now launch the actual container StartContainerRequest startRequest = StartContainerRequest.newInstance(containerLaunchContext, event.getContainerToken()); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(startRequest); StartContainersRequest requestList = StartContainersRequest.newInstance(list); StartContainersResponse response = proxy.getContainerManagementProtocol().startContainers(requestList); if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerID)) { throw response.getFailedRequests().get(containerID).deSerialize(); } ByteBuffer portInfo = response.getAllServicesMetaData().get( ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID); int port = -1; if(portInfo != null) { port = ShuffleHandler.deserializeMetaData(portInfo); } LOG.info("Shuffle port returned by ContainerManager for " + taskAttemptID + " : " + port); if(port < 0) { this.state = ContainerState.FAILED; throw new IllegalStateException("Invalid shuffle port number " + port + " returned for " + taskAttemptID); } // after launching, send launched event to task attempt to move // it from ASSIGNED to RUNNING state context.getEventHandler().handle( new TaskAttemptContainerLaunchedEvent(taskAttemptID, port)); this.state = ContainerState.RUNNING; } catch (Throwable t) { String message = "Container launch failed for " + containerID + " : " + StringUtils.stringifyException(t); this.state = ContainerState.FAILED; sendContainerLaunchFailedMsg(taskAttemptID, message); } finally { if (proxy != null) { cmProxy.mayBeCloseProxy(proxy); } } } @SuppressWarnings("unchecked") public synchronized void kill() { if(this.state == ContainerState.PREP) { this.state = ContainerState.KILLED_BEFORE_LAUNCH; } else if (!isCompletelyDone()) { LOG.info("KILLING " + taskAttemptID); ContainerManagementProtocolProxyData proxy = null; try { proxy = getCMProxy(this.containerMgrAddress, this.containerID); // kill the remote container if already launched List<ContainerId> ids = new ArrayList<ContainerId>(); ids.add(this.containerID); StopContainersRequest request = StopContainersRequest.newInstance(ids); StopContainersResponse response = proxy.getContainerManagementProtocol().stopContainers(request); if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(this.containerID)) { throw response.getFailedRequests().get(this.containerID) .deSerialize(); } } catch (Throwable t) { // ignore the cleanup failure String message = "cleanup failed for container " + this.containerID + " : " + StringUtils.stringifyException(t); context.getEventHandler() .handle( new TaskAttemptDiagnosticsUpdateEvent(this.taskAttemptID, message)); LOG.warn(message); } finally { if (proxy != null) { cmProxy.mayBeCloseProxy(proxy); } } this.state = ContainerState.DONE; } // after killing, send killed event to task attempt context.getEventHandler().handle( new TaskAttemptEvent(this.taskAttemptID, TaskAttemptEventType.TA_CONTAINER_CLEANED)); } } public ContainerLauncherImpl(AppContext context) { super(ContainerLauncherImpl.class.getName()); this.context = context; this.stopped = new AtomicBoolean(false); } @Override protected void serviceInit(Configuration conf) throws Exception { this.limitOnPoolSize = conf.getInt( MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT); LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize); this.initialPoolSize = conf.getInt( MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE); LOG.info("The thread pool initial size is " + this.initialPoolSize); super.serviceInit(conf); cmProxy = new ContainerManagementProtocolProxy(conf); } protected void serviceStart() throws Exception { ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat( "ContainerLauncher #%d").setDaemon(true).build(); // Start with a default core-pool size of 10 and change it dynamically. launcherPool = new ThreadPoolExecutor(initialPoolSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf); eventHandlingThread = new Thread() { @Override public void run() { ContainerLauncherEvent event = null; Set<String> allNodes = new HashSet<String>(); while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { event = eventQueue.take(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.error("Returning, interrupted : " + e); } return; } allNodes.add(event.getContainerMgrAddress()); int poolSize = launcherPool.getCorePoolSize(); // See if we need up the pool size only if haven't reached the // maximum limit yet. if (poolSize != limitOnPoolSize) { // nodes where containers will run at *this* point of time. This is // *not* the cluster size and doesn't need to be. int numNodes = allNodes.size(); int idealPoolSize = Math.min(limitOnPoolSize, numNodes); if (poolSize < idealPoolSize) { // Bump up the pool size to idealPoolSize+initialPoolSize, the // later is just a buffer so we are not always increasing the // pool-size int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize + initialPoolSize); LOG.info("Setting ContainerLauncher pool size to " + newPoolSize + " as number-of-nodes to talk to is " + numNodes); launcherPool.setCorePoolSize(newPoolSize); } } // the events from the queue are handled in parallel // using a thread pool launcherPool.execute(createEventProcessor(event)); // TODO: Group launching of multiple containers to a single // NodeManager into a single connection } } }; eventHandlingThread.setName("ContainerLauncher Event Handler"); eventHandlingThread.start(); super.serviceStart(); } private void shutdownAllContainers() { for (Container ct : this.containers.values()) { if (ct != null) { ct.kill(); } } } protected void serviceStop() throws Exception { if (stopped.getAndSet(true)) { // return if already stopped return; } // shutdown any containers that might be left running shutdownAllContainers(); if (eventHandlingThread != null) { eventHandlingThread.interrupt(); } if (launcherPool != null) { launcherPool.shutdownNow(); } super.serviceStop(); } protected EventProcessor createEventProcessor(ContainerLauncherEvent event) { return new EventProcessor(event); } /** * Setup and start the container on remote nodemanager. */ class EventProcessor implements Runnable { private ContainerLauncherEvent event; EventProcessor(ContainerLauncherEvent event) { this.event = event; } @Override public void run() { LOG.info("Processing the event " + event.toString()); // Load ContainerManager tokens before creating a connection. // TODO: Do it only once per NodeManager. ContainerId containerID = event.getContainerID(); Container c = getContainer(event); switch(event.getType()) { case CONTAINER_REMOTE_LAUNCH: ContainerRemoteLaunchEvent launchEvent = (ContainerRemoteLaunchEvent) event; c.launch(launchEvent); break; case CONTAINER_REMOTE_CLEANUP: c.kill(); break; case CONTAINER_COMPLETED: c.done(); break; } removeContainerIfDone(containerID); } } @SuppressWarnings("unchecked") void sendContainerLaunchFailedMsg(TaskAttemptId taskAttemptID, String message) { LOG.error(message); context.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message)); context.getEventHandler().handle( new TaskAttemptEvent(taskAttemptID, TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); } @Override public void handle(ContainerLauncherEvent event) { try { eventQueue.put(event); } catch (InterruptedException e) { throw new YarnRuntimeException(e); } } public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(String containerMgrBindAddr, ContainerId containerId) throws IOException { return cmProxy.getProxy(containerMgrBindAddr, containerId); } }
15,390
35.558195
116
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.launcher; import org.apache.hadoop.mapred.Task; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; public class ContainerRemoteLaunchEvent extends ContainerLauncherEvent { private final Container allocatedContainer; private final ContainerLaunchContext containerLaunchContext; private final Task task; public ContainerRemoteLaunchEvent(TaskAttemptId taskAttemptID, ContainerLaunchContext containerLaunchContext, Container allocatedContainer, Task remoteTask) { super(taskAttemptID, allocatedContainer.getId(), StringInterner .weakIntern(allocatedContainer.getNodeId().toString()), allocatedContainer.getContainerToken(), ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH); this.allocatedContainer = allocatedContainer; this.containerLaunchContext = containerLaunchContext; this.task = remoteTask; } public ContainerLaunchContext getContainerLaunchContext() { return this.containerLaunchContext; } public Container getAllocatedContainer() { return this.allocatedContainer; } public Task getRemoteTask() { return this.task; } @Override public int hashCode() { return super.hashCode(); } @Override public boolean equals(Object obj) { return super.equals(obj); } }
2,288
33.164179
74
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.launcher; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.event.AbstractEvent; public class ContainerLauncherEvent extends AbstractEvent<ContainerLauncher.EventType> { private TaskAttemptId taskAttemptID; private ContainerId containerID; private String containerMgrAddress; private Token containerToken; public ContainerLauncherEvent(TaskAttemptId taskAttemptID, ContainerId containerID, String containerMgrAddress, Token containerToken, ContainerLauncher.EventType type) { super(type); this.taskAttemptID = taskAttemptID; this.containerID = containerID; this.containerMgrAddress = containerMgrAddress; this.containerToken = containerToken; } public TaskAttemptId getTaskAttemptID() { return this.taskAttemptID; } public ContainerId getContainerID() { return containerID; } public String getContainerMgrAddress() { return containerMgrAddress; } public Token getContainerToken() { return containerToken; } @Override public String toString() { return super.toString() + " for container " + containerID + " taskAttempt " + taskAttemptID; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((containerID == null) ? 0 : containerID.hashCode()); result = prime * result + ((containerMgrAddress == null) ? 0 : containerMgrAddress.hashCode()); result = prime * result + ((containerToken == null) ? 0 : containerToken.hashCode()); result = prime * result + ((taskAttemptID == null) ? 0 : taskAttemptID.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ContainerLauncherEvent other = (ContainerLauncherEvent) obj; if (containerID == null) { if (other.containerID != null) return false; } else if (!containerID.equals(other.containerID)) return false; if (containerMgrAddress == null) { if (other.containerMgrAddress != null) return false; } else if (!containerMgrAddress.equals(other.containerMgrAddress)) return false; if (containerToken == null) { if (other.containerToken != null) return false; } else if (!containerToken.equals(other.containerToken)) return false; if (taskAttemptID == null) { if (other.taskAttemptID != null) return false; } else if (!taskAttemptID.equals(other.taskAttemptID)) return false; return true; } }
3,637
30.362069
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.launcher; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.yarn.event.EventHandler; public interface ContainerLauncher extends EventHandler<ContainerLauncherEvent> { enum EventType { CONTAINER_REMOTE_LAUNCH, CONTAINER_REMOTE_CLEANUP, // When TaskAttempt receives TA_CONTAINER_COMPLETED, // it will notify ContainerLauncher so that the container can be removed // from ContainerLauncher's launched containers list // Otherwise, ContainerLauncher will try to stop the containers as part of // serviceStop. CONTAINER_COMPLETED } }
1,433
34.85
78
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.local; import org.apache.hadoop.classification.InterfaceAudience;
942
43.904762
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.local; import java.io.IOException; import java.util.ArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; /** * Allocates containers locally. Doesn't allocate a real container; * instead sends an allocated event for all requests. */ public class LocalContainerAllocator extends RMCommunicator implements ContainerAllocator { private static final Log LOG = LogFactory.getLog(LocalContainerAllocator.class); @SuppressWarnings("rawtypes") private final EventHandler eventHandler; private long retryInterval; private long retrystartTime; private String nmHost; private int nmPort; private int nmHttpPort; private ContainerId containerId; protected int lastResponseID; private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); public LocalContainerAllocator(ClientService clientService, AppContext context, String nmHost, int nmPort, int nmHttpPort , ContainerId cId) { super(clientService, context); this.eventHandler = context.getEventHandler(); this.nmHost = nmHost; this.nmPort = nmPort; this.nmHttpPort = nmHttpPort; this.containerId = cId; } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS); // Init startTime to current time. If all goes well, it will be reset after // first attempt to contact RM. retrystartTime = System.currentTimeMillis(); } @SuppressWarnings("unchecked") @Override protected synchronized void heartbeat() throws Exception { AllocateRequest allocateRequest = AllocateRequest.newInstance(this.lastResponseID, super.getApplicationProgress(), new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>(), null); AllocateResponse allocateResponse = null; try { allocateResponse = scheduler.allocate(allocateRequest); // Reset retry count if no exception occurred. retrystartTime = System.currentTimeMillis(); } catch (ApplicationAttemptNotFoundException e) { LOG.info("Event from RM: shutting down Application Master"); // This can happen if the RM has been restarted. If it is in that state, // this application must clean itself up. eventHandler.handle(new JobEvent(this.getJob().getID(), JobEventType.JOB_AM_REBOOT)); throw new YarnRuntimeException( "Resource Manager doesn't recognize AttemptId: " + this.getContext().getApplicationID(), e); } catch (ApplicationMasterNotRegisteredException e) { LOG.info("ApplicationMaster is out of sync with ResourceManager," + " hence resync and send outstanding requests."); this.lastResponseID = 0; register(); } catch (Exception e) { // This can happen when the connection to the RM has gone down. Keep // re-trying until the retryInterval has expired. if (System.currentTimeMillis() - retrystartTime >= retryInterval) { LOG.error("Could not contact RM after " + retryInterval + " milliseconds."); eventHandler.handle(new JobEvent(this.getJob().getID(), JobEventType.INTERNAL_ERROR)); throw new YarnRuntimeException("Could not contact RM after " + retryInterval + " milliseconds."); } // Throw this up to the caller, which may decide to ignore it and // continue to attempt to contact the RM. throw e; } if (allocateResponse != null) { this.lastResponseID = allocateResponse.getResponseId(); Token token = allocateResponse.getAMRMToken(); if (token != null) { updateAMRMToken(token); } } } private void updateAMRMToken(Token token) throws IOException { org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token .getIdentifier().array(), token.getPassword().array(), new Text( token.getKind()), new Text(token.getService())); UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser(); currentUGI.addToken(amrmToken); amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig())); } @SuppressWarnings("unchecked") @Override public void handle(ContainerAllocatorEvent event) { if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) { LOG.info("Processing the event " + event.toString()); // Assign the same container ID as the AM ContainerId cID = ContainerId.newContainerId(getContext().getApplicationAttemptId(), this.containerId.getContainerId()); Container container = recordFactory.newRecordInstance(Container.class); container.setId(cID); NodeId nodeId = NodeId.newInstance(this.nmHost, this.nmPort); container.setNodeId(nodeId); container.setContainerToken(null); container.setNodeHttpAddress(this.nmHost + ":" + this.nmHttpPort); // send the container-assigned event to task attempt if (event.getAttemptID().getTaskId().getTaskType() == TaskType.MAP) { JobCounterUpdateEvent jce = new JobCounterUpdateEvent(event.getAttemptID().getTaskId() .getJobId()); // TODO Setting OTHER_LOCAL_MAP for now. jce.addCounterUpdate(JobCounter.OTHER_LOCAL_MAPS, 1); eventHandler.handle(jce); } eventHandler.handle(new TaskAttemptContainerAssignedEvent( event.getAttemptID(), container, applicationACLs)); } } }
8,392
42.487047
86
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.classification.InterfaceAudience;
946
44.095238
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; public interface TaskRuntimeEstimator { public void enrollAttempt(TaskAttemptStatus reportedStatus, long timestamp); public long attemptEnrolledTime(TaskAttemptId attemptID); public void updateAttempt(TaskAttemptStatus reportedStatus, long timestamp); public void contextualize(Configuration conf, AppContext context); /** * * Find a maximum reasonable execution wallclock time. Includes the time * already elapsed. * * Find a maximum reasonable execution time. Includes the time * already elapsed. If the projected total execution time for this task * ever exceeds its reasonable execution time, we may speculate it. * * @param id the {@link TaskId} of the task we are asking about * @return the task's maximum reasonable runtime, or MAX_VALUE if * we don't have enough information to rule out any runtime, * however long. * */ public long thresholdRuntime(TaskId id); /** * * Estimate a task attempt's total runtime. Includes the time already * elapsed. * * @param id the {@link TaskAttemptId} of the attempt we are asking about * @return our best estimate of the attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * */ public long estimatedRuntime(TaskAttemptId id); /** * * Estimates how long a new attempt on this task will take if we start * one now * * @param id the {@link TaskId} of the task we are asking about * @return our best estimate of a new attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * */ public long estimatedNewAttemptRuntime(TaskId id); /** * * Computes the width of the error band of our estimate of the task * runtime as returned by {@link #estimatedRuntime(TaskAttemptId)} * * @param id the {@link TaskAttemptId} of the attempt we are asking about * @return our best estimate of the attempt's runtime, or {@code -1} if * we don't have enough information yet to produce an estimate. * */ public long runtimeEstimateVariance(TaskAttemptId id); }
3,343
35.747253
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; /** * This estimator exponentially smooths the rate of progress versus wallclock * time. Conceivably we could write an estimator that smooths time per * unit progress, and get different results. */ public class ExponentiallySmoothedTaskRuntimeEstimator extends StartEndTimesBase { private final ConcurrentMap<TaskAttemptId, AtomicReference<EstimateVector>> estimates = new ConcurrentHashMap<TaskAttemptId, AtomicReference<EstimateVector>>(); private SmoothedValue smoothedValue; private long lambda; public enum SmoothedValue { RATE, TIME_PER_UNIT_PROGRESS } ExponentiallySmoothedTaskRuntimeEstimator (long lambda, SmoothedValue smoothedValue) { super(); this.smoothedValue = smoothedValue; this.lambda = lambda; } public ExponentiallySmoothedTaskRuntimeEstimator() { super(); } // immutable private class EstimateVector { final double value; final float basedOnProgress; final long atTime; EstimateVector(double value, float basedOnProgress, long atTime) { this.value = value; this.basedOnProgress = basedOnProgress; this.atTime = atTime; } EstimateVector incorporate(float newProgress, long newAtTime) { if (newAtTime <= atTime || newProgress < basedOnProgress) { return this; } double oldWeighting = value < 0.0 ? 0.0 : Math.exp(((double) (newAtTime - atTime)) / lambda); double newRead = (newProgress - basedOnProgress) / (newAtTime - atTime); if (smoothedValue == SmoothedValue.TIME_PER_UNIT_PROGRESS) { newRead = 1.0 / newRead; } return new EstimateVector (value * oldWeighting + newRead * (1.0 - oldWeighting), newProgress, newAtTime); } } private void incorporateReading (TaskAttemptId attemptID, float newProgress, long newTime) { //TODO: Refactor this method, it seems more complicated than necessary. AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID); if (vectorRef == null) { estimates.putIfAbsent(attemptID, new AtomicReference<EstimateVector>(null)); incorporateReading(attemptID, newProgress, newTime); return; } EstimateVector oldVector = vectorRef.get(); if (oldVector == null) { if (vectorRef.compareAndSet(null, new EstimateVector(-1.0, 0.0F, Long.MIN_VALUE))) { return; } incorporateReading(attemptID, newProgress, newTime); return; } while (!vectorRef.compareAndSet (oldVector, oldVector.incorporate(newProgress, newTime))) { oldVector = vectorRef.get(); } } private EstimateVector getEstimateVector(TaskAttemptId attemptID) { AtomicReference<EstimateVector> vectorRef = estimates.get(attemptID); if (vectorRef == null) { return null; } return vectorRef.get(); } @Override public void contextualize(Configuration conf, AppContext context) { super.contextualize(conf, context); lambda = conf.getLong(MRJobConfig.MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS, MRJobConfig.DEFAULT_MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS); smoothedValue = conf.getBoolean(MRJobConfig.MR_AM_TASK_ESTIMATOR_EXPONENTIAL_RATE_ENABLE, true) ? SmoothedValue.RATE : SmoothedValue.TIME_PER_UNIT_PROGRESS; } @Override public long estimatedRuntime(TaskAttemptId id) { Long startTime = startTimes.get(id); if (startTime == null) { return -1L; } EstimateVector vector = getEstimateVector(id); if (vector == null) { return -1L; } long sunkTime = vector.atTime - startTime; double value = vector.value; float progress = vector.basedOnProgress; if (value == 0) { return -1L; } double rate = smoothedValue == SmoothedValue.RATE ? value : 1.0 / value; if (rate == 0.0) { return -1L; } double remainingTime = (1.0 - progress) / rate; return sunkTime + (long)remainingTime; } @Override public long runtimeEstimateVariance(TaskAttemptId id) { return -1L; } @Override public void updateAttempt(TaskAttemptStatus status, long timestamp) { super.updateAttempt(status, timestamp); TaskAttemptId attemptID = status.id; float progress = status.progress; incorporateReading(attemptID, progress, timestamp); } }
5,683
28.450777
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/NullTaskRuntimesEngine.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; /* * This class is provided solely as an exemplae of the values that mean * that nothing needs to be computed. It's not currently used. */ public class NullTaskRuntimesEngine implements TaskRuntimeEstimator { @Override public void enrollAttempt(TaskAttemptStatus status, long timestamp) { // no code } @Override public long attemptEnrolledTime(TaskAttemptId attemptID) { return Long.MAX_VALUE; } @Override public void updateAttempt(TaskAttemptStatus status, long timestamp) { // no code } @Override public void contextualize(Configuration conf, AppContext context) { // no code } @Override public long thresholdRuntime(TaskId id) { return Long.MAX_VALUE; } @Override public long estimatedRuntime(TaskAttemptId id) { return -1L; } @Override public long estimatedNewAttemptRuntime(TaskId id) { return -1L; } @Override public long runtimeEstimateVariance(TaskAttemptId id) { return -1L; } }
2,151
28.479452
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.Clock; import com.google.common.annotations.VisibleForTesting; public class DefaultSpeculator extends AbstractService implements Speculator { private static final long ON_SCHEDULE = Long.MIN_VALUE; private static final long ALREADY_SPECULATING = Long.MIN_VALUE + 1; private static final long TOO_NEW = Long.MIN_VALUE + 2; private static final long PROGRESS_IS_GOOD = Long.MIN_VALUE + 3; private static final long NOT_RUNNING = Long.MIN_VALUE + 4; private static final long TOO_LATE_TO_SPECULATE = Long.MIN_VALUE + 5; private long soonestRetryAfterNoSpeculate; private long soonestRetryAfterSpeculate; private double proportionRunningTasksSpeculatable; private double proportionTotalTasksSpeculatable; private int minimumAllowedSpeculativeTasks; private static final Log LOG = LogFactory.getLog(DefaultSpeculator.class); private final ConcurrentMap<TaskId, Boolean> runningTasks = new ConcurrentHashMap<TaskId, Boolean>(); // Used to track any TaskAttempts that aren't heart-beating for a while, so // that we can aggressively speculate instead of waiting for task-timeout. private final ConcurrentMap<TaskAttemptId, TaskAttemptHistoryStatistics> runningTaskAttemptStatistics = new ConcurrentHashMap<TaskAttemptId, TaskAttemptHistoryStatistics>(); // Regular heartbeat from tasks is every 3 secs. So if we don't get a // heartbeat in 9 secs (3 heartbeats), we simulate a heartbeat with no change // in progress. private static final long MAX_WAITTING_TIME_FOR_HEARTBEAT = 9 * 1000; // These are the current needs, not the initial needs. For each job, these // record the number of attempts that exist and that are actively // waiting for a container [as opposed to running or finished] private final ConcurrentMap<JobId, AtomicInteger> mapContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>(); private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds = new ConcurrentHashMap<JobId, AtomicInteger>(); private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>(); private final Configuration conf; private AppContext context; private Thread speculationBackgroundThread = null; private volatile boolean stopped = false; private BlockingQueue<SpeculatorEvent> eventQueue = new LinkedBlockingQueue<SpeculatorEvent>(); private TaskRuntimeEstimator estimator; private BlockingQueue<Object> scanControl = new LinkedBlockingQueue<Object>(); private final Clock clock; private final EventHandler<TaskEvent> eventHandler; public DefaultSpeculator(Configuration conf, AppContext context) { this(conf, context, context.getClock()); } public DefaultSpeculator(Configuration conf, AppContext context, Clock clock) { this(conf, context, getEstimator(conf, context), clock); } static private TaskRuntimeEstimator getEstimator (Configuration conf, AppContext context) { TaskRuntimeEstimator estimator; try { // "yarn.mapreduce.job.task.runtime.estimator.class" Class<? extends TaskRuntimeEstimator> estimatorClass = conf.getClass(MRJobConfig.MR_AM_TASK_ESTIMATOR, LegacyTaskRuntimeEstimator.class, TaskRuntimeEstimator.class); Constructor<? extends TaskRuntimeEstimator> estimatorConstructor = estimatorClass.getConstructor(); estimator = estimatorConstructor.newInstance(); estimator.contextualize(conf, context); } catch (InstantiationException ex) { LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (IllegalAccessException ex) { LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (InvocationTargetException ex) { LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } catch (NoSuchMethodException ex) { LOG.error("Can't make a speculation runtime estimator", ex); throw new YarnRuntimeException(ex); } return estimator; } // This constructor is designed to be called by other constructors. // However, it's public because we do use it in the test cases. // Normally we figure out our own estimator. public DefaultSpeculator (Configuration conf, AppContext context, TaskRuntimeEstimator estimator, Clock clock) { super(DefaultSpeculator.class.getName()); this.conf = conf; this.context = context; this.estimator = estimator; this.clock = clock; this.eventHandler = context.getEventHandler(); this.soonestRetryAfterNoSpeculate = conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_NO_SPECULATE, MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_NO_SPECULATE); this.soonestRetryAfterSpeculate = conf.getLong(MRJobConfig.SPECULATIVE_RETRY_AFTER_SPECULATE, MRJobConfig.DEFAULT_SPECULATIVE_RETRY_AFTER_SPECULATE); this.proportionRunningTasksSpeculatable = conf.getDouble(MRJobConfig.SPECULATIVECAP_RUNNING_TASKS, MRJobConfig.DEFAULT_SPECULATIVECAP_RUNNING_TASKS); this.proportionTotalTasksSpeculatable = conf.getDouble(MRJobConfig.SPECULATIVECAP_TOTAL_TASKS, MRJobConfig.DEFAULT_SPECULATIVECAP_TOTAL_TASKS); this.minimumAllowedSpeculativeTasks = conf.getInt(MRJobConfig.SPECULATIVE_MINIMUM_ALLOWED_TASKS, MRJobConfig.DEFAULT_SPECULATIVE_MINIMUM_ALLOWED_TASKS); } /* ************************************************************* */ // This is the task-mongering that creates the two new threads -- one for // processing events from the event queue and one for periodically // looking for speculation opportunities @Override protected void serviceStart() throws Exception { Runnable speculationBackgroundCore = new Runnable() { @Override public void run() { while (!stopped && !Thread.currentThread().isInterrupted()) { long backgroundRunStartTime = clock.getTime(); try { int speculations = computeSpeculations(); long mininumRecomp = speculations > 0 ? soonestRetryAfterSpeculate : soonestRetryAfterNoSpeculate; long wait = Math.max(mininumRecomp, clock.getTime() - backgroundRunStartTime); if (speculations > 0) { LOG.info("We launched " + speculations + " speculations. Sleeping " + wait + " milliseconds."); } Object pollResult = scanControl.poll(wait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { if (!stopped) { LOG.error("Background thread returning, interrupted", e); } return; } } } }; speculationBackgroundThread = new Thread (speculationBackgroundCore, "DefaultSpeculator background processing"); speculationBackgroundThread.start(); super.serviceStart(); } @Override protected void serviceStop()throws Exception { stopped = true; // this could be called before background thread is established if (speculationBackgroundThread != null) { speculationBackgroundThread.interrupt(); } super.serviceStop(); } @Override public void handleAttempt(TaskAttemptStatus status) { long timestamp = clock.getTime(); statusUpdate(status, timestamp); } // This section is not part of the Speculator interface; it's used only for // testing public boolean eventQueueEmpty() { return eventQueue.isEmpty(); } // This interface is intended to be used only for test cases. public void scanForSpeculations() { LOG.info("We got asked to run a debug speculation scan."); // debug System.out.println("We got asked to run a debug speculation scan."); System.out.println("There are " + scanControl.size() + " events stacked already."); scanControl.add(new Object()); Thread.yield(); } /* ************************************************************* */ // This section contains the code that gets run for a SpeculatorEvent private AtomicInteger containerNeed(TaskId taskID) { JobId jobID = taskID.getJobId(); TaskType taskType = taskID.getTaskType(); ConcurrentMap<JobId, AtomicInteger> relevantMap = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds; AtomicInteger result = relevantMap.get(jobID); if (result == null) { relevantMap.putIfAbsent(jobID, new AtomicInteger(0)); result = relevantMap.get(jobID); } return result; } private synchronized void processSpeculatorEvent(SpeculatorEvent event) { switch (event.getType()) { case ATTEMPT_STATUS_UPDATE: statusUpdate(event.getReportedStatus(), event.getTimestamp()); break; case TASK_CONTAINER_NEED_UPDATE: { AtomicInteger need = containerNeed(event.getTaskID()); need.addAndGet(event.containersNeededChange()); break; } case ATTEMPT_START: { LOG.info("ATTEMPT_START " + event.getTaskID()); estimator.enrollAttempt (event.getReportedStatus(), event.getTimestamp()); break; } case JOB_CREATE: { LOG.info("JOB_CREATE " + event.getJobID()); estimator.contextualize(getConfig(), context); break; } } } /** * Absorbs one TaskAttemptStatus * * @param reportedStatus the status report that we got from a task attempt * that we want to fold into the speculation data for this job * @param timestamp the time this status corresponds to. This matters * because statuses contain progress. */ protected void statusUpdate(TaskAttemptStatus reportedStatus, long timestamp) { String stateString = reportedStatus.taskState.toString(); TaskAttemptId attemptID = reportedStatus.id; TaskId taskID = attemptID.getTaskId(); Job job = context.getJob(taskID.getJobId()); if (job == null) { return; } Task task = job.getTask(taskID); if (task == null) { return; } estimator.updateAttempt(reportedStatus, timestamp); if (stateString.equals(TaskAttemptState.RUNNING.name())) { runningTasks.putIfAbsent(taskID, Boolean.TRUE); } else { runningTasks.remove(taskID, Boolean.TRUE); if (!stateString.equals(TaskAttemptState.STARTING.name())) { runningTaskAttemptStatistics.remove(attemptID); } } } /* ************************************************************* */ // This is the code section that runs periodically and adds speculations for // those jobs that need them. // This can return a few magic values for tasks that shouldn't speculate: // returns ON_SCHEDULE if thresholdRuntime(taskID) says that we should not // considering speculating this task // returns ALREADY_SPECULATING if that is true. This has priority. // returns TOO_NEW if our companion task hasn't gotten any information // returns PROGRESS_IS_GOOD if the task is sailing through // returns NOT_RUNNING if the task is not running // // All of these values are negative. Any value that should be allowed to // speculate is 0 or positive. private long speculationValue(TaskId taskID, long now) { Job job = context.getJob(taskID.getJobId()); Task task = job.getTask(taskID); Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts(); long acceptableRuntime = Long.MIN_VALUE; long result = Long.MIN_VALUE; if (!mayHaveSpeculated.contains(taskID)) { acceptableRuntime = estimator.thresholdRuntime(taskID); if (acceptableRuntime == Long.MAX_VALUE) { return ON_SCHEDULE; } } TaskAttemptId runningTaskAttemptID = null; int numberRunningAttempts = 0; for (TaskAttempt taskAttempt : attempts.values()) { if (taskAttempt.getState() == TaskAttemptState.RUNNING || taskAttempt.getState() == TaskAttemptState.STARTING) { if (++numberRunningAttempts > 1) { return ALREADY_SPECULATING; } runningTaskAttemptID = taskAttempt.getID(); long estimatedRunTime = estimator.estimatedRuntime(runningTaskAttemptID); long taskAttemptStartTime = estimator.attemptEnrolledTime(runningTaskAttemptID); if (taskAttemptStartTime > now) { // This background process ran before we could process the task // attempt status change that chronicles the attempt start return TOO_NEW; } long estimatedEndTime = estimatedRunTime + taskAttemptStartTime; long estimatedReplacementEndTime = now + estimator.estimatedNewAttemptRuntime(taskID); float progress = taskAttempt.getProgress(); TaskAttemptHistoryStatistics data = runningTaskAttemptStatistics.get(runningTaskAttemptID); if (data == null) { runningTaskAttemptStatistics.put(runningTaskAttemptID, new TaskAttemptHistoryStatistics(estimatedRunTime, progress, now)); } else { if (estimatedRunTime == data.getEstimatedRunTime() && progress == data.getProgress()) { // Previous stats are same as same stats if (data.notHeartbeatedInAWhile(now)) { // Stats have stagnated for a while, simulate heart-beat. TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus(); taskAttemptStatus.id = runningTaskAttemptID; taskAttemptStatus.progress = progress; taskAttemptStatus.taskState = taskAttempt.getState(); // Now simulate the heart-beat handleAttempt(taskAttemptStatus); } } else { // Stats have changed - update our data structure data.setEstimatedRunTime(estimatedRunTime); data.setProgress(progress); data.resetHeartBeatTime(now); } } if (estimatedEndTime < now) { return PROGRESS_IS_GOOD; } if (estimatedReplacementEndTime >= estimatedEndTime) { return TOO_LATE_TO_SPECULATE; } result = estimatedEndTime - estimatedReplacementEndTime; } } // If we are here, there's at most one task attempt. if (numberRunningAttempts == 0) { return NOT_RUNNING; } if (acceptableRuntime == Long.MIN_VALUE) { acceptableRuntime = estimator.thresholdRuntime(taskID); if (acceptableRuntime == Long.MAX_VALUE) { return ON_SCHEDULE; } } return result; } //Add attempt to a given Task. protected void addSpeculativeAttempt(TaskId taskID) { LOG.info ("DefaultSpeculator.addSpeculativeAttempt -- we are speculating " + taskID); eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_ADD_SPEC_ATTEMPT)); mayHaveSpeculated.add(taskID); } @Override public void handle(SpeculatorEvent event) { processSpeculatorEvent(event); } private int maybeScheduleAMapSpeculation() { return maybeScheduleASpeculation(TaskType.MAP); } private int maybeScheduleAReduceSpeculation() { return maybeScheduleASpeculation(TaskType.REDUCE); } private int maybeScheduleASpeculation(TaskType type) { int successes = 0; long now = clock.getTime(); ConcurrentMap<JobId, AtomicInteger> containerNeeds = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds; for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) { // This race conditon is okay. If we skip a speculation attempt we // should have tried because the event that lowers the number of // containers needed to zero hasn't come through, it will next time. // Also, if we miss the fact that the number of containers needed was // zero but increased due to a failure it's not too bad to launch one // container prematurely. if (jobEntry.getValue().get() > 0) { continue; } int numberSpeculationsAlready = 0; int numberRunningTasks = 0; // loop through the tasks of the kind Job job = context.getJob(jobEntry.getKey()); Map<TaskId, Task> tasks = job.getTasks(type); int numberAllowedSpeculativeTasks = (int) Math.max(minimumAllowedSpeculativeTasks, proportionTotalTasksSpeculatable * tasks.size()); TaskId bestTaskID = null; long bestSpeculationValue = -1L; // this loop is potentially pricey. // TODO track the tasks that are potentially worth looking at for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) { long mySpeculationValue = speculationValue(taskEntry.getKey(), now); if (mySpeculationValue == ALREADY_SPECULATING) { ++numberSpeculationsAlready; } if (mySpeculationValue != NOT_RUNNING) { ++numberRunningTasks; } if (mySpeculationValue > bestSpeculationValue) { bestTaskID = taskEntry.getKey(); bestSpeculationValue = mySpeculationValue; } } numberAllowedSpeculativeTasks = (int) Math.max(numberAllowedSpeculativeTasks, proportionRunningTasksSpeculatable * numberRunningTasks); // If we found a speculation target, fire it off if (bestTaskID != null && numberAllowedSpeculativeTasks > numberSpeculationsAlready) { addSpeculativeAttempt(bestTaskID); ++successes; } } return successes; } private int computeSpeculations() { // We'll try to issue one map and one reduce speculation per job per run return maybeScheduleAMapSpeculation() + maybeScheduleAReduceSpeculation(); } static class TaskAttemptHistoryStatistics { private long estimatedRunTime; private float progress; private long lastHeartBeatTime; public TaskAttemptHistoryStatistics(long estimatedRunTime, float progress, long nonProgressStartTime) { this.estimatedRunTime = estimatedRunTime; this.progress = progress; resetHeartBeatTime(nonProgressStartTime); } public long getEstimatedRunTime() { return this.estimatedRunTime; } public float getProgress() { return this.progress; } public void setEstimatedRunTime(long estimatedRunTime) { this.estimatedRunTime = estimatedRunTime; } public void setProgress(float progress) { this.progress = progress; } public boolean notHeartbeatedInAWhile(long now) { if (now - lastHeartBeatTime <= MAX_WAITTING_TIME_FOR_HEARTBEAT) { return false; } else { resetHeartBeatTime(now); return true; } } public void resetHeartBeatTime(long lastHeartBeatTime) { this.lastHeartBeatTime = lastHeartBeatTime; } } @VisibleForTesting public long getSoonestRetryAfterNoSpeculate() { return soonestRetryAfterNoSpeculate; } @VisibleForTesting public long getSoonestRetryAfterSpeculate() { return soonestRetryAfterSpeculate; } @VisibleForTesting public double getProportionRunningTasksSpeculatable() { return proportionRunningTasksSpeculatable; } @VisibleForTesting public double getProportionTotalTasksSpeculatable() { return proportionTotalTasksSpeculatable; } @VisibleForTesting public int getMinimumAllowedSpeculativeTasks() { return minimumAllowedSpeculativeTasks; } }
22,268
34.516746
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskSpeculationPredicate.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; public class TaskSpeculationPredicate { boolean canSpeculate(AppContext context, TaskId taskID) { // This class rejects speculating any task that already has speculations, // or isn't running. // Subclasses should call TaskSpeculationPredicate.canSpeculate(...) , but // can be even more restrictive. JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); Task task = job.getTask(taskID); return task.getAttempts().size() == 1; } }
1,603
39.1
79
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DataStatistics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; public class DataStatistics { private int count = 0; private double sum = 0; private double sumSquares = 0; public DataStatistics() { } public DataStatistics(double initNum) { this.count = 1; this.sum = initNum; this.sumSquares = initNum * initNum; } public synchronized void add(double newNum) { this.count++; this.sum += newNum; this.sumSquares += newNum * newNum; } public synchronized void updateStatistics(double old, double update) { this.sum += update - old; this.sumSquares += (update * update) - (old * old); } public synchronized double mean() { return count == 0 ? 0.0 : sum/count; } public synchronized double var() { // E(X^2) - E(X)^2 if (count <= 1) { return 0.0; } double mean = mean(); return Math.max((sumSquares/count) - mean * mean, 0.0d); } public synchronized double std() { return Math.sqrt(this.var()); } public synchronized double outlier(float sigma) { if (count != 0.0) { return mean() + std() * sigma; } return 0.0; } public synchronized double count() { return count; } public String toString() { return "DataStatistics: count is " + count + ", sum is " + sum + ", sumSquares is " + sumSquares + " mean is " + mean() + " std() is " + std(); } }
2,107
25.683544
82
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/SpeculatorEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; public class SpeculatorEvent extends AbstractEvent<Speculator.EventType> { // valid for ATTEMPT_STATUS_UPDATE private TaskAttemptStatus reportedStatus; // valid for TASK_CONTAINER_NEED_UPDATE private TaskId taskID; private int containersNeededChange; // valid for CREATE_JOB private JobId jobID; public SpeculatorEvent(JobId jobID, long timestamp) { super(Speculator.EventType.JOB_CREATE, timestamp); this.jobID = jobID; } public SpeculatorEvent(TaskAttemptStatus reportedStatus, long timestamp) { super(Speculator.EventType.ATTEMPT_STATUS_UPDATE, timestamp); this.reportedStatus = reportedStatus; } public SpeculatorEvent(TaskAttemptId attemptID, boolean flag, long timestamp) { super(Speculator.EventType.ATTEMPT_START, timestamp); this.reportedStatus = new TaskAttemptStatus(); this.reportedStatus.id = attemptID; this.taskID = attemptID.getTaskId(); } /* * This c'tor creates a TASK_CONTAINER_NEED_UPDATE event . * We send a +1 event when a task enters a state where it wants a container, * and a -1 event when it either gets one or withdraws the request. * The per job sum of all these events is the number of containers requested * but not granted. The intent is that we only do speculations when the * speculation wouldn't compete for containers with tasks which need * to be run. */ public SpeculatorEvent(TaskId taskID, int containersNeededChange) { super(Speculator.EventType.TASK_CONTAINER_NEED_UPDATE); this.taskID = taskID; this.containersNeededChange = containersNeededChange; } public TaskAttemptStatus getReportedStatus() { return reportedStatus; } public int containersNeededChange() { return containersNeededChange; } public TaskId getTaskID() { return taskID; } public JobId getJobID() { return jobID; } }
3,055
34.534884
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/StartEndTimesBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.api.records.TaskType; abstract class StartEndTimesBase implements TaskRuntimeEstimator { static final float MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE = 0.05F; static final int MINIMUM_COMPLETE_NUMBER_TO_SPECULATE = 1; protected AppContext context = null; protected final Map<TaskAttemptId, Long> startTimes = new ConcurrentHashMap<TaskAttemptId, Long>(); // XXXX This class design assumes that the contents of AppContext.getAllJobs // never changes. Is that right? // // This assumption comes in in several places, mostly in data structure that // can grow without limit if a AppContext gets new Job's when the old ones // run out. Also, these mapper statistics blocks won't cover the Job's // we don't know about. protected final Map<Job, DataStatistics> mapperStatistics = new HashMap<Job, DataStatistics>(); protected final Map<Job, DataStatistics> reducerStatistics = new HashMap<Job, DataStatistics>(); private final Map<Job, Float> slowTaskRelativeTresholds = new HashMap<Job, Float>(); protected final Set<Task> doneTasks = new HashSet<Task>(); @Override public void enrollAttempt(TaskAttemptStatus status, long timestamp) { startTimes.put(status.id,timestamp); } @Override public long attemptEnrolledTime(TaskAttemptId attemptID) { Long result = startTimes.get(attemptID); return result == null ? Long.MAX_VALUE : result; } @Override public void contextualize(Configuration conf, AppContext context) { this.context = context; Map<JobId, Job> allJobs = context.getAllJobs(); for (Map.Entry<JobId, Job> entry : allJobs.entrySet()) { final Job job = entry.getValue(); mapperStatistics.put(job, new DataStatistics()); reducerStatistics.put(job, new DataStatistics()); slowTaskRelativeTresholds.put (job, conf.getFloat(MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD,1.0f)); } } protected DataStatistics dataStatisticsForTask(TaskId taskID) { JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); if (job == null) { return null; } Task task = job.getTask(taskID); if (task == null) { return null; } return task.getType() == TaskType.MAP ? mapperStatistics.get(job) : task.getType() == TaskType.REDUCE ? reducerStatistics.get(job) : null; } @Override public long thresholdRuntime(TaskId taskID) { JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); TaskType type = taskID.getTaskType(); DataStatistics statistics = dataStatisticsForTask(taskID); int completedTasksOfType = type == TaskType.MAP ? job.getCompletedMaps() : job.getCompletedReduces(); int totalTasksOfType = type == TaskType.MAP ? job.getTotalMaps() : job.getTotalReduces(); if (completedTasksOfType < MINIMUM_COMPLETE_NUMBER_TO_SPECULATE || (((float)completedTasksOfType) / totalTasksOfType) < MINIMUM_COMPLETE_PROPORTION_TO_SPECULATE ) { return Long.MAX_VALUE; } long result = statistics == null ? Long.MAX_VALUE : (long)statistics.outlier(slowTaskRelativeTresholds.get(job)); return result; } @Override public long estimatedNewAttemptRuntime(TaskId id) { DataStatistics statistics = dataStatisticsForTask(id); if (statistics == null) { return -1L; } return (long)statistics.mean(); } @Override public void updateAttempt(TaskAttemptStatus status, long timestamp) { TaskAttemptId attemptID = status.id; TaskId taskID = attemptID.getTaskId(); JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); if (job == null) { return; } Task task = job.getTask(taskID); if (task == null) { return; } Long boxedStart = startTimes.get(attemptID); long start = boxedStart == null ? Long.MIN_VALUE : boxedStart; TaskAttempt taskAttempt = task.getAttempt(attemptID); if (taskAttempt.getState() == TaskAttemptState.SUCCEEDED) { boolean isNew = false; // is this a new success? synchronized (doneTasks) { if (!doneTasks.contains(task)) { doneTasks.add(task); isNew = true; } } // It's a new completion // Note that if a task completes twice [because of a previous speculation // and a race, or a success followed by loss of the machine with the // local data] we only count the first one. if (isNew) { long finish = timestamp; if (start > 1L && finish > 1L && start <= finish) { long duration = finish - start; DataStatistics statistics = dataStatisticsForTask(taskID); if (statistics != null) { statistics.add(duration); } } } } } }
6,629
30.273585
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/Speculator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; import org.apache.hadoop.yarn.event.EventHandler; /** * Speculator component. Task Attempts' status updates are sent to this * component. Concrete implementation runs the speculative algorithm and * sends the TaskEventType.T_ADD_ATTEMPT. * * An implementation also has to arrange for the jobs to be scanned from * time to time, to launch the speculations. */ public interface Speculator extends EventHandler<SpeculatorEvent> { enum EventType { ATTEMPT_STATUS_UPDATE, ATTEMPT_START, TASK_CONTAINER_NEED_UPDATE, JOB_CREATE } // This will be implemented if we go to a model where the events are // processed within the TaskAttempts' state transitions' code. public void handleAttempt(TaskAttemptStatus status); }
1,708
36.152174
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/LegacyTaskRuntimeEstimator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.speculate; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; public class LegacyTaskRuntimeEstimator extends StartEndTimesBase { private final Map<TaskAttempt, AtomicLong> attemptRuntimeEstimates = new ConcurrentHashMap<TaskAttempt, AtomicLong>(); private final ConcurrentHashMap<TaskAttempt, AtomicLong> attemptRuntimeEstimateVariances = new ConcurrentHashMap<TaskAttempt, AtomicLong>(); @Override public void updateAttempt(TaskAttemptStatus status, long timestamp) { super.updateAttempt(status, timestamp); TaskAttemptId attemptID = status.id; TaskId taskID = attemptID.getTaskId(); JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); if (job == null) { return; } Task task = job.getTask(taskID); if (task == null) { return; } TaskAttempt taskAttempt = task.getAttempt(attemptID); if (taskAttempt == null) { return; } Long boxedStart = startTimes.get(attemptID); long start = boxedStart == null ? Long.MIN_VALUE : boxedStart; // We need to do two things. // 1: If this is a completion, we accumulate statistics in the superclass // 2: If this is not a completion, we learn more about it. // This is not a completion, but we're cooking. // if (taskAttempt.getState() == TaskAttemptState.RUNNING) { // See if this task is already in the registry AtomicLong estimateContainer = attemptRuntimeEstimates.get(taskAttempt); AtomicLong estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt); if (estimateContainer == null) { if (attemptRuntimeEstimates.get(taskAttempt) == null) { attemptRuntimeEstimates.put(taskAttempt, new AtomicLong()); estimateContainer = attemptRuntimeEstimates.get(taskAttempt); } } if (estimateVarianceContainer == null) { attemptRuntimeEstimateVariances.putIfAbsent(taskAttempt, new AtomicLong()); estimateVarianceContainer = attemptRuntimeEstimateVariances.get(taskAttempt); } long estimate = -1; long varianceEstimate = -1; // This code assumes that we'll never consider starting a third // speculative task attempt if two are already running for this task if (start > 0 && timestamp > start) { estimate = (long) ((timestamp - start) / Math.max(0.0001, status.progress)); varianceEstimate = (long) (estimate * status.progress / 10); } if (estimateContainer != null) { estimateContainer.set(estimate); } if (estimateVarianceContainer != null) { estimateVarianceContainer.set(varianceEstimate); } } } private long storedPerAttemptValue (Map<TaskAttempt, AtomicLong> data, TaskAttemptId attemptID) { TaskId taskID = attemptID.getTaskId(); JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); Task task = job.getTask(taskID); if (task == null) { return -1L; } TaskAttempt taskAttempt = task.getAttempt(attemptID); if (taskAttempt == null) { return -1L; } AtomicLong estimate = data.get(taskAttempt); return estimate == null ? -1L : estimate.get(); } @Override public long estimatedRuntime(TaskAttemptId attemptID) { return storedPerAttemptValue(attemptRuntimeEstimates, attemptID); } @Override public long runtimeEstimateVariance(TaskAttemptId attemptID) { return storedPerAttemptValue(attemptRuntimeEstimateVariances, attemptID); } }
4,965
31.887417
99
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/JobStateInternal.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce.v2.app.job; public enum JobStateInternal { NEW, SETUP, INITED, RUNNING, COMMITTING, SUCCEEDED, FAIL_WAIT, FAIL_ABORT, FAILED, KILL_WAIT, KILL_ABORT, KILLED, ERROR, REBOOT }
1,046
27.297297
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.mapreduce.v2.app.job; import org.apache.hadoop.classification.InterfaceAudience;
940
43.809524
75
java